about summary refs log tree commit diff stats
path: root/bash
diff options
context:
space:
mode:
Diffstat (limited to 'bash')
-rwxr-xr-xbash/acme-stuff/acmetodo (renamed from bash/acmetodo)0
-rwxr-xr-xbash/acme-stuff/acmetodo-add (renamed from bash/acmetodo-add)0
-rwxr-xr-xbash/acme-stuff/acmetodo-all (renamed from bash/acmetodo-all)0
-rwxr-xr-xbash/acme-stuff/acmetodo-done (renamed from bash/acmetodo-done)0
-rw-r--r--bash/acme-stuff/acmetodo-filter (renamed from bash/acmetodo-filter)0
-rwxr-xr-xbash/acme-stuff/acmetodo-inprogress (renamed from bash/acmetodo-inprogress)0
-rwxr-xr-xbash/acme-stuff/acmetodo-todo (renamed from bash/acmetodo-todo)0
-rwxr-xr-xbash/acme-stuff/acmetodo-toggle (renamed from bash/acmetodo-toggle)0
-rwxr-xr-xbash/computer295
-rwxr-xr-xbash/consensus373
-rwxr-xr-xbash/critique (renamed from bash/dds)49
-rwxr-xr-xbash/exploration246
-rwxr-xr-xbash/logging.sh146
-rwxr-xr-xbash/metrics18
-rwxr-xr-xbash/peer-review259
-rwxr-xr-xbash/socratic213
-rwxr-xr-xbash/synthesis240
-rwxr-xr-xbash/unit-conversion/c-2-f (renamed from bash/c-2-f)0
-rwxr-xr-xbash/unit-conversion/f-2-c (renamed from bash/f-2-c)0
19 files changed, 1832 insertions, 7 deletions
diff --git a/bash/acmetodo b/bash/acme-stuff/acmetodo
index 0c0b72f..0c0b72f 100755
--- a/bash/acmetodo
+++ b/bash/acme-stuff/acmetodo
diff --git a/bash/acmetodo-add b/bash/acme-stuff/acmetodo-add
index b40663d..b40663d 100755
--- a/bash/acmetodo-add
+++ b/bash/acme-stuff/acmetodo-add
diff --git a/bash/acmetodo-all b/bash/acme-stuff/acmetodo-all
index c00bb9b..c00bb9b 100755
--- a/bash/acmetodo-all
+++ b/bash/acme-stuff/acmetodo-all
diff --git a/bash/acmetodo-done b/bash/acme-stuff/acmetodo-done
index 4829331..4829331 100755
--- a/bash/acmetodo-done
+++ b/bash/acme-stuff/acmetodo-done
diff --git a/bash/acmetodo-filter b/bash/acme-stuff/acmetodo-filter
index 6149207..6149207 100644
--- a/bash/acmetodo-filter
+++ b/bash/acme-stuff/acmetodo-filter
diff --git a/bash/acmetodo-inprogress b/bash/acme-stuff/acmetodo-inprogress
index d5ea505..d5ea505 100755
--- a/bash/acmetodo-inprogress
+++ b/bash/acme-stuff/acmetodo-inprogress
diff --git a/bash/acmetodo-todo b/bash/acme-stuff/acmetodo-todo
index 6149207..6149207 100755
--- a/bash/acmetodo-todo
+++ b/bash/acme-stuff/acmetodo-todo
diff --git a/bash/acmetodo-toggle b/bash/acme-stuff/acmetodo-toggle
index bffccec..bffccec 100755
--- a/bash/acmetodo-toggle
+++ b/bash/acme-stuff/acmetodo-toggle
diff --git a/bash/computer b/bash/computer
new file mode 100755
index 0000000..e5aa36d
--- /dev/null
+++ b/bash/computer
@@ -0,0 +1,295 @@
+#!/bin/bash
+
+# Get the directory where this script is located
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# Computer Dispatch System
+# This script intelligently routes prompts to the most appropriate thinking mechanism
+# or directly to Ollama based on complexity, question type, and user intent.
+#
+# APPLICATION LOGIC:
+# The computer dispatch system implements an intelligent routing mechanism that
+# analyzes user prompts and determines the optimal response strategy. The system
+# operates through three distinct phases designed to maximize response quality:
+#
+# PHASE 1 - PROMPT ANALYSIS:
+#   - Analyzes prompt complexity, length, and question type
+#   - Identifies user intent and specific keywords
+#   - Determines if direct Ollama response is appropriate
+#   - Classifies prompts into response categories
+#
+# PHASE 2 - MECHANISM SELECTION:
+#   - Routes to appropriate thinking mechanism based on classification
+#   - Uses decision tree with keywords for clear cases
+#   - Considers prompt complexity and user intent
+#   - Falls back to direct Ollama for simple cases
+#
+# PHASE 3 - RESPONSE EXECUTION:
+#   - Executes the selected mechanism or direct Ollama call
+#   - Maintains transparency about the routing decision
+#   - Provides consistent output format regardless of mechanism
+#   - Logs the decision process for analysis
+#
+# DISPATCH MODELING:
+# The system applies intelligent routing principles to AI response generation:
+#   - Prompt classification helps match complexity to appropriate mechanism
+#   - Keyword analysis identifies specific user needs and intent
+#   - Decision tree provides consistent, predictable routing logic
+#   - Direct Ollama routing handles simple cases efficiently
+#   - Transparency shows users how their prompt was processed
+#   - The system may improve response quality by using specialized mechanisms
+#
+# The dispatch process emphasizes efficiency and appropriateness,
+# ensuring users get the best possible response for their specific needs.
+# The system balances speed with depth based on prompt characteristics.
+
+# --- Model Configuration ---
+DEFAULT_MODEL="gemma3n:e2b"
+
+# --- Defaults ---
+DEFAULT_ROUNDS=2
+
+# --- Argument Validation ---
+if [ "$#" -lt 1 ]; then
+    echo -e "\n\tComputer"
+    echo -e "\tThis script intelligently routes prompts to the most appropriate thinking mechanism"
+    echo -e "\tor directly to Ollama based on complexity, question type, and user intent."
+    echo -e "\n\tUsage: $0 [-f <file_path>] [-d] \"<your prompt>\" [number_of_rounds]"
+    echo -e "\n\tExample: $0 -f ./input.txt \"Please analyze this text\" 2"
+    echo -e "\n\tIf number_of_rounds is not provided, the program will default to $DEFAULT_ROUNDS rounds."
+    echo -e "\n\t-f <file_path> (optional): Append the contents of the file to the prompt."
+    echo -e "\n\t-d (optional): Force direct Ollama response (bypass thinking mechanisms)."
+    echo -e "\n"
+    exit 1
+fi
+
+# --- Argument Parsing ---
+FILE_PATH=""
+FORCE_DIRECT=false
+while getopts "f:d" opt; do
+  case $opt in
+    f)
+      FILE_PATH="$OPTARG"
+      ;;
+    d)
+      FORCE_DIRECT=true
+      ;;
+    *)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+PROMPT="$1"
+if [ -z "$2" ]; then
+    ROUNDS=$DEFAULT_ROUNDS
+else
+    ROUNDS=$2
+fi
+
+# If file path is provided, append its contents to the prompt
+if [ -n "$FILE_PATH" ]; then
+    if [ ! -f "$FILE_PATH" ]; then
+        echo "File not found: $FILE_PATH" >&2
+        exit 1
+    fi
+    FILE_CONTENTS=$(cat "$FILE_PATH")
+    PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]"
+fi
+
+# Source the logging system using absolute path
+source "${SCRIPT_DIR}/logging.sh"
+
+# --- File Initialization ---
+# Create a temporary directory if it doesn't exist
+mkdir -p ~/tmp
+# Create a unique file for this session based on the timestamp
+SESSION_FILE=~/tmp/computer_$(date +%Y%m%d_%H%M%S).txt
+
+# Initialize timing
+SESSION_ID=$(generate_session_id)
+start_timer "$SESSION_ID" "computer"
+
+echo "Computer Dispatch Session Log: ${SESSION_FILE}"
+echo "---------------------------------"
+
+# Store the initial user prompt in the session file
+echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}"
+echo "FORCE DIRECT: ${FORCE_DIRECT}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Prompt Analysis Function ---
+analyze_prompt() {
+    local prompt="$1"
+    local analysis=""
+    
+    # Check for direct Ollama requests
+    if [[ "$prompt" =~ (direct|simple|quick|fast|straight) ]]; then
+        analysis="DIRECT"
+        return
+    fi
+    
+    # Check prompt length (simple heuristic for complexity)
+    local word_count=$(echo "$prompt" | wc -w)
+    
+    # Very short prompts (likely simple questions)
+    if [ "$word_count" -le 5 ]; then
+        analysis="DIRECT"
+        return
+    fi
+    
+    # Keyword-based classification
+    if [[ "$prompt" =~ (consensus|agree|disagree|vote|multiple|perspectives|opinions) ]]; then
+        analysis="CONSENSUS"
+    elif [[ "$prompt" =~ (synthesize|combine|integrate|unify|merge|consolidate) ]]; then
+        analysis="SYNTHESIS"
+    elif [[ "$prompt" =~ (explore|paths|alternatives|options|compare|strategies|approaches) ]]; then
+        analysis="EXPLORATION"
+    elif [[ "$prompt" =~ (analyze|examine|explore|investigate|deep|thorough|comprehensive) ]]; then
+        analysis="SOCRATIC"
+    elif [[ "$prompt" =~ (improve|refine|edit|revise|better|enhance|polish|fix) ]]; then
+        analysis="CRITIQUE"
+    elif [[ "$prompt" =~ (review|feedback|peer|collaborate|suggest|advice) ]]; then
+        analysis="PEER_REVIEW"
+    else
+        # Default to direct for unclear cases
+        analysis="DIRECT"
+    fi
+    
+    echo "$analysis"
+}
+
+# --- Mechanism Selection ---
+echo "Analyzing prompt and selecting mechanism..."
+echo "PROMPT ANALYSIS:" >> "${SESSION_FILE}"
+
+if [ "$FORCE_DIRECT" = true ]; then
+    MECHANISM="DIRECT"
+    REASON="User requested direct response with -d flag"
+else
+    MECHANISM=$(analyze_prompt "$PROMPT")
+    case "$MECHANISM" in
+        "DIRECT")
+            REASON="Simple prompt or direct request"
+            ;;
+        "CONSENSUS")
+            REASON="Multiple perspectives or consensus needed"
+            ;;
+        "SYNTHESIS")
+            REASON="Integration of multiple approaches needed"
+            ;;
+        "EXPLORATION")
+            REASON="Systematic exploration of alternatives needed"
+            ;;
+        "SOCRATIC")
+            REASON="Deep analysis or exploration required"
+            ;;
+        "CRITIQUE")
+            REASON="Improvement or refinement requested"
+            ;;
+        "PEER_REVIEW")
+            REASON="Collaborative review or feedback needed"
+            ;;
+        *)
+            REASON="Default fallback"
+            MECHANISM="DIRECT"
+            ;;
+    esac
+fi
+
+echo "Selected mechanism: ${MECHANISM}" >> "${SESSION_FILE}"
+echo "Reason: ${REASON}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+echo "Selected mechanism: ${MECHANISM}"
+echo "Reason: ${REASON}"
+echo "---------------------------------"
+
+# --- Response Execution ---
+echo "Executing selected mechanism..."
+echo "RESPONSE EXECUTION:" >> "${SESSION_FILE}"
+
+case "$MECHANISM" in
+    "DIRECT")
+        echo "Using direct Ollama response..."
+        echo "DIRECT OLLAMA RESPONSE:" >> "${SESSION_FILE}"
+        
+        DIRECT_PROMPT="You are an expert assistant. You always flag if you don't know something. Please provide a clear, helpful response to the following prompt: ${PROMPT}"
+        
+        RESPONSE=$(ollama run "${DEFAULT_MODEL}" "${DIRECT_PROMPT}")
+        
+        echo "${RESPONSE}" >> "${SESSION_FILE}"
+        echo "" >> "${SESSION_FILE}"
+        
+        echo "---------------------------------"
+        echo "Direct response:"
+        echo "---------------------------------"
+        echo "${RESPONSE}"
+        ;;
+        
+    "CONSENSUS")
+        echo "Delegating to consensus mechanism..."
+        echo "DELEGATING TO CONSENSUS:" >> "${SESSION_FILE}"
+        
+        # Execute consensus script and display output directly
+        "${SCRIPT_DIR}/consensus" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}"
+        ;;
+        
+    "SOCRATIC")
+        echo "Delegating to Socratic mechanism..."
+        echo "DELEGATING TO SOCRATIC:" >> "${SESSION_FILE}"
+        
+        # Execute Socratic script and display output directly
+        "${SCRIPT_DIR}/socratic" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}"
+        ;;
+        
+    "CRITIQUE")
+        echo "Delegating to critique mechanism..."
+        echo "DELEGATING TO CRITIQUE:" >> "${SESSION_FILE}"
+        
+        # Execute critique script and display output directly
+        "${SCRIPT_DIR}/critique" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}"
+        ;;
+        
+        "PEER_REVIEW")
+        echo "Delegating to peer-review mechanism..."
+        echo "DELEGATING TO PEER_REVIEW:" >> "${SESSION_FILE}"
+        
+        # Execute peer-review script and display output directly
+        "${SCRIPT_DIR}/peer-review" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}"
+        ;;
+        
+        "SYNTHESIS")
+        echo "Delegating to synthesis mechanism..."
+        echo "DELEGATING TO SYNTHESIS:" >> "${SESSION_FILE}"
+        
+        # Execute synthesis script and display output directly
+        "${SCRIPT_DIR}/synthesis" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}"
+        ;;
+        
+        "EXPLORATION")
+        echo "Delegating to exploration mechanism..."
+        echo "DELEGATING TO EXPLORATION:" >> "${SESSION_FILE}"
+        
+        # Execute exploration script and display output directly
+        "${SCRIPT_DIR}/exploration" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}"
+        ;;
+esac
+
+# --- Final Summary ---
+echo "" >> "${SESSION_FILE}"
+echo "DISPATCH SUMMARY:" >> "${SESSION_FILE}"
+echo "================" >> "${SESSION_FILE}"
+echo "Original Prompt: ${PROMPT}" >> "${SESSION_FILE}"
+echo "Selected Mechanism: ${MECHANISM}" >> "${SESSION_FILE}"
+echo "Reason: ${REASON}" >> "${SESSION_FILE}"
+echo "Rounds: ${ROUNDS}" >> "${SESSION_FILE}"
+
+# End timing
+duration=$(end_timer "$SESSION_ID" "computer")
+
+echo ""
+echo "Execution time: ${duration} seconds"
+echo "Full dispatch log: ${SESSION_FILE}" 
\ No newline at end of file
diff --git a/bash/consensus b/bash/consensus
new file mode 100755
index 0000000..f978614
--- /dev/null
+++ b/bash/consensus
@@ -0,0 +1,373 @@
+#!/bin/bash
+
+# Consensus System
+# This script uses multiple LLM models to achieve consensus on a response through voting.
+#
+# APPLICATION LOGIC:
+# The consensus process uses a multi-round voting system where multiple AI models
+# attempt to reach agreement on a response. The system operates through four phases
+# designed to reduce bias and improve reliability:
+#
+# PHASE 1 - RESPONSE GENERATION:
+#   - Models independently generate responses to avoid identical outputs
+#   - Self-assessment of confidence provides internal quality indicators
+#   - Different model architectures may produce varied perspectives
+#   - Robust extraction handles formatting inconsistencies
+#
+# PHASE 2 - CONFIDENCE VALIDATION:
+#   - A randomly selected judge model provides external quality assessment
+#   - Random selection helps prevent bias toward any particular model
+#   - External validation may catch overconfident self-assessments
+#   - Quality control through independent review
+#
+# PHASE 3 - CROSS-MODEL VOTING:
+#   - Each model evaluates others' work, creating a peer-review system
+#   - Exclusion of self-voting prevents self-preference bias
+#   - Collective evaluation uses different model perspectives
+#   - Voting process distributes decision-making across models
+#
+# PHASE 4 - CONSENSUS DETERMINATION:
+#   - >50% threshold requires majority agreement rather than plurality
+#   - Fallback mechanisms provide output even when consensus isn't reached
+#   - Transparent vote counting shows decision process
+#   - Caveats indicate when consensus wasn't reached
+#
+# CONSENSUS MODELING:
+# The system applies voting principles to AI model collaboration:
+#   - Random judge selection helps reduce systematic bias
+#   - Collective decision-making may reduce individual model errors
+#   - Peer review provides multiple evaluation perspectives
+#   - Transparency shows how decisions were made
+#   - Iterative rounds may improve response quality
+#   - Error handling addresses model inconsistencies
+#
+# The consensus threshold (>50%) requires majority agreement,
+# while random judge selection helps prevent single-model dominance.
+# The system emphasizes transparency and reliability in the decision process.
+
+# --- Model Configuration ---
+MODELS=(
+    "llama3:8b-instruct-q4_K_M"
+    "phi3:3.8b-mini-4k-instruct-q4_K_M"
+    "deepseek-r1:1.5b"
+    "gemma3n:e2b"
+    "dolphin3:latest"
+)
+
+# Randomly select judge model from available models
+JUDGE_MODEL="${MODELS[$((RANDOM % ${#MODELS[@]}))]}"
+
+# --- Defaults ---
+DEFAULT_ROUNDS=2
+
+# --- Argument Validation ---
+if [ "$#" -lt 1 ]; then
+    echo -e "\n\tConsensus"
+    echo -e "\tThis script uses multiple LLM models to achieve consensus through voting."
+    echo -e "\n\tUsage: $0 [-f <file_path>] \"<your prompt>\" [number_of_rounds]"
+    echo -e "\n\tExample: $0 -f ./input.txt \"Please summarize this text file\" 2"
+    echo -e "\n\tIf number_of_rounds is not provided, the program will default to $DEFAULT_ROUNDS rounds."
+    echo -e "\n\t-f <file_path> (optional): Append the contents of the file to the prompt."
+    echo -e "\n"
+    exit 1
+fi
+
+# --- Argument Parsing ---
+FILE_PATH=""
+while getopts "f:" opt; do
+  case $opt in
+    f)
+      FILE_PATH="$OPTARG"
+      ;;
+    *)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+PROMPT="$1"
+if [ -z "$2" ]; then
+    ROUNDS=$DEFAULT_ROUNDS
+else
+    ROUNDS=$2
+fi
+
+# If file path is provided, append its contents to the prompt
+if [ -n "$FILE_PATH" ]; then
+    if [ ! -f "$FILE_PATH" ]; then
+        echo "File not found: $FILE_PATH" >&2
+        exit 1
+    fi
+    FILE_CONTENTS=$(cat "$FILE_PATH")
+    PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]"
+fi
+
+# --- File Initialization ---
+# Create a temporary directory if it doesn't exist
+mkdir -p ~/tmp
+# Create a unique file for this session based on the timestamp
+SESSION_FILE=~/tmp/consensus_$(date +%Y%m%d_%H%M%S).txt
+
+echo "Consensus Session Log: ${SESSION_FILE}"
+echo "---------------------------------"
+echo "Judge model selected: ${JUDGE_MODEL}"
+echo "---------------------------------"
+
+# Store the initial user prompt in the session file
+echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}"
+echo "JUDGE MODEL: ${JUDGE_MODEL}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+echo "Processing consensus with ${#MODELS[@]} models over ${ROUNDS} rounds..."
+
+# --- Consensus Rounds ---
+for round in $(seq 1 "${ROUNDS}"); do
+    echo "Starting consensus round ${round} of ${ROUNDS}..."
+    echo "ROUND ${round}:" >> "${SESSION_FILE}"
+    echo "================" >> "${SESSION_FILE}"
+    
+    # --- Step 1: Each model generates a response with confidence ---
+    echo "Step 1: Generating responses with confidence scores..."
+    echo "STEP 1 - MODEL RESPONSES:" >> "${SESSION_FILE}"
+    
+    declare -a responses
+    declare -a confidences
+    declare -a model_names
+    
+    for i in "${!MODELS[@]}"; do
+        model="${MODELS[$i]}"
+        echo "  Generating response from ${model}..."
+        
+        # Prompt for response with confidence
+        RESPONSE_PROMPT="You are an expert assistant. Please respond to the following prompt and provide your confidence level (strictly 'low', 'medium', or 'high') at the end of your response.
+
+PROMPT: ${PROMPT}
+
+IMPORTANT: Format your response exactly as follows:
+[RESPONSE]
+Your detailed response here...
+[CONFIDENCE]
+low
+
+OR
+
+[RESPONSE]
+Your detailed response here...
+[CONFIDENCE]
+medium
+
+OR
+
+[RESPONSE]
+Your detailed response here...
+[CONFIDENCE]
+high
+
+Make sure to include both [RESPONSE] and [CONFIDENCE] tags exactly as shown."
+
+        response_output=$(ollama run "${model}" "${RESPONSE_PROMPT}")
+        
+        # Extract response and confidence
+        response_text=$(echo "${response_output}" | sed -n '/\[RESPONSE\]/,/\[CONFIDENCE\]/p' | sed '1d;$d' | sed '$d')
+        
+        # If response extraction failed, use the full output (excluding confidence line)
+        if [ -z "$response_text" ]; then
+            response_text=$(echo "${response_output}" | sed '/\[CONFIDENCE\]/,$d' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
+        fi
+        
+        confidence=$(echo "${response_output}" | grep -A1 "\[CONFIDENCE\]" | tail -n1 | tr '[:upper:]' '[:lower:]' | xargs)
+        
+        # If confidence extraction failed, try alternative methods
+        if [ -z "$confidence" ]; then
+            confidence=$(echo "${response_output}" | grep -i "confidence" | tail -n1 | grep -o -i "\(low\|medium\|high\)" | head -n1)
+        fi
+        
+        # Validate confidence level
+        if [[ ! "$confidence" =~ ^(low|medium|high)$ ]]; then
+            confidence="medium"  # Default if invalid
+        fi
+        
+        # Store results
+        responses[$i]="${response_text}"
+        confidences[$i]="${confidence}"
+        model_names[$i]="${model}"
+        
+        # Debug: Check if response was extracted properly
+        if [ -z "${response_text}" ]; then
+            echo "  WARNING: Empty response extracted from ${model}" >&2
+        fi
+        
+        # Log to session file
+        echo "MODEL ${i+1} (${model}):" >> "${SESSION_FILE}"
+        echo "Response: ${response_text}" >> "${SESSION_FILE}"
+        echo "Confidence: ${confidence}" >> "${SESSION_FILE}"
+        echo "" >> "${SESSION_FILE}"
+    done
+    
+    # --- Step 2: Judge validates confidence scores ---
+    echo "Step 2: Validating confidence scores..."
+    echo "STEP 2 - CONFIDENCE VALIDATION:" >> "${SESSION_FILE}"
+    
+    declare -a validated_confidences
+    
+    for i in "${!MODELS[@]}"; do
+        model="${MODELS[$i]}"
+        response="${responses[$i]}"
+        confidence="${confidences[$i]}"
+        
+        JUDGE_PROMPT="You are a judge evaluating confidence scores. Review this response and its claimed confidence level, then provide your own confidence assessment.
+
+RESPONSE: ${response}
+CLAIMED CONFIDENCE: ${confidence}
+
+Based on the quality, completeness, and accuracy of this response, what is your confidence level? Respond with only: low, medium, or high"
+
+        judge_output=$(ollama run "${JUDGE_MODEL}" "${JUDGE_PROMPT}")
+        judge_confidence=$(echo "${judge_output}" | tr '[:upper:]' '[:lower:]' | grep -o -i "\(low\|medium\|high\)" | head -n1)
+        
+        # Validate judge confidence
+        if [[ ! "$judge_confidence" =~ ^(low|medium|high)$ ]]; then
+            judge_confidence="medium"  # Default if invalid
+        fi
+        
+        validated_confidences[$i]="${judge_confidence}"
+        
+        echo "MODEL ${i+1} (${model}):" >> "${SESSION_FILE}"
+        echo "  Claimed confidence: ${confidence}" >> "${SESSION_FILE}"
+        echo "  Validated confidence: ${judge_confidence}" >> "${SESSION_FILE}"
+        echo "" >> "${SESSION_FILE}"
+    done
+    
+    # --- Step 3: Models vote on best response ---
+    echo "Step 3: Models voting on best response..."
+    echo "STEP 3 - VOTING:" >> "${SESSION_FILE}"
+    
+    # Create voting prompt with all responses
+    voting_prompt="You are a voter in a consensus system. Below are responses from different models to the same prompt. Please vote for the BEST response by providing the model number (1-${#MODELS[@]}).
+
+ORIGINAL PROMPT: ${PROMPT}
+
+RESPONSES:"
+    
+    for i in "${!MODELS[@]}"; do
+        voting_prompt="${voting_prompt}
+
+MODEL ${i+1} (${model_names[$i]}):
+${responses[$i]}
+Validated Confidence: ${validated_confidences[$i]}"
+    done
+    
+    voting_prompt="${voting_prompt}
+
+Please vote by responding with only the model number (1-${#MODELS[@]}) that you think provided the best response."
+
+    declare -a votes
+    declare -a vote_counts
+    
+    # Initialize vote counts
+    for i in "${!MODELS[@]}"; do
+        vote_counts[$i]=0
+    done
+    
+    # Each model votes
+    for i in "${!MODELS[@]}"; do
+        model="${MODELS[$i]}"
+        echo "  Getting vote from ${model}..."
+        
+        vote_output=$(ollama run "${model}" "${voting_prompt}")
+        vote=$(echo "${vote_output}" | grep -o '[0-9]\+' | head -1)
+        
+        # Validate vote
+        if [[ "$vote" =~ ^[0-9]+$ ]] && [ "$vote" -ge 1 ] && [ "$vote" -le "${#MODELS[@]}" ]; then
+            votes[$i]=$((vote - 1))  # Convert to 0-based index
+            vote_counts[$((vote - 1))]=$((${vote_counts[$((vote - 1))]} + 1))
+        else
+            votes[$i]=$i  # Default to voting for self if invalid
+            vote_counts[$i]=$((${vote_counts[$i]} + 1))
+        fi
+        
+        echo "MODEL ${i+1} (${model}) voted for MODEL $((votes[$i] + 1))" >> "${SESSION_FILE}"
+    done
+    
+    # --- Step 4: Determine consensus ---
+    echo "Step 4: Determining consensus..."
+    echo "STEP 4 - CONSENSUS DETERMINATION:" >> "${SESSION_FILE}"
+    
+    # Find the response with the most votes
+    max_votes=0
+    winning_model=-1
+    
+    for i in "${!MODELS[@]}"; do
+        if [ "${vote_counts[$i]}" -gt "$max_votes" ]; then
+            max_votes="${vote_counts[$i]}"
+            winning_model=$i
+        fi
+    done
+    
+    # Check if we have consensus (more than 50% of votes)
+    total_votes=${#MODELS[@]}
+    consensus_threshold=$((total_votes / 2 + 1))
+    
+    if [ "$max_votes" -ge "$consensus_threshold" ]; then
+        consensus_reached=true
+        consensus_message="CONSENSUS REACHED: Model $((winning_model + 1)) (${model_names[$winning_model]}) won with ${max_votes}/${total_votes} votes"
+    else
+        consensus_reached=false
+        consensus_message="NO CONSENSUS: Model $((winning_model + 1)) (${model_names[$winning_model]}) had highest votes (${max_votes}/${total_votes}) but consensus threshold is ${consensus_threshold}"
+    fi
+    
+    echo "Vote counts:" >> "${SESSION_FILE}"
+    for i in "${!MODELS[@]}"; do
+        echo "  Model $((i + 1)) (${model_names[$i]}): ${vote_counts[$i]} votes" >> "${SESSION_FILE}"
+    done
+    echo "" >> "${SESSION_FILE}"
+    echo "${consensus_message}" >> "${SESSION_FILE}"
+    echo "" >> "${SESSION_FILE}"
+    
+    # Store the winning response for next round or final output
+    if [ "$winning_model" -ge 0 ]; then
+        CURRENT_RESPONSE="${responses[$winning_model]}"
+        CURRENT_CONFIDENCE="${validated_confidences[$winning_model]}"
+        CURRENT_MODEL="${model_names[$winning_model]}"
+        
+        # Fallback: if winning response is empty, use the first non-empty response
+        if [ -z "$CURRENT_RESPONSE" ]; then
+            for i in "${!responses[@]}"; do
+                if [ -n "${responses[$i]}" ]; then
+                    CURRENT_RESPONSE="${responses[$i]}"
+                    CURRENT_CONFIDENCE="${validated_confidences[$i]}"
+                    CURRENT_MODEL="${model_names[$i]}"
+                    echo "  Using fallback response from ${CURRENT_MODEL}" >&2
+                    break
+                fi
+            done
+        fi
+    fi
+    
+    echo "Round ${round} complete: ${consensus_message}"
+    echo "" >> "${SESSION_FILE}"
+done
+
+# --- Final Output ---
+echo "---------------------------------"
+echo "Consensus process complete."
+echo "Final result:"
+echo "---------------------------------"
+
+# Print final summary
+echo "CONSENSUS SUMMARY:" >> "${SESSION_FILE}"
+echo "==================" >> "${SESSION_FILE}"
+echo "Final Answer: ${CURRENT_RESPONSE}" >> "${SESSION_FILE}"
+echo "Model: ${CURRENT_MODEL}" >> "${SESSION_FILE}"
+echo "Confidence: ${CURRENT_CONFIDENCE}" >> "${SESSION_FILE}"
+echo "Consensus Status: ${consensus_message}" >> "${SESSION_FILE}"
+
+echo "Final Answer:"
+echo "${CURRENT_RESPONSE}"
+echo ""
+echo "Model: ${CURRENT_MODEL}"
+echo "Confidence: ${CURRENT_CONFIDENCE}"
+echo "Consensus Status: ${consensus_message}"
+echo ""
+echo "Full session log: ${SESSION_FILE}" 
\ No newline at end of file
diff --git a/bash/dds b/bash/critique
index f14a79b..35a1db0 100755
--- a/bash/dds
+++ b/bash/critique
@@ -1,7 +1,42 @@
 #!/bin/bash
 
-# Daydreaming System
-# This script uses a sequence of LLM calls to refine an initial response.
+# Critique System
+# This script uses a sequence of LLM calls to refine an initial response through critique and revision.
+#
+# APPLICATION LOGIC:
+# The critique process implements an iterative refinement system where AI models
+# collaborate to improve response quality through critique and revision. The system
+# operates through three distinct phases designed to enhance clarity and accuracy:
+#
+# PHASE 1 - INITIAL RESPONSE GENERATION:
+#   - A response model generates the first answer to the user's prompt
+#   - The model is instructed to be honest about knowledge limitations
+#   - This creates a baseline response that can be improved through iteration
+#   - The initial response serves as the foundation for refinement
+#
+# PHASE 2 - CRITICAL REVIEW:
+#   - A critic model analyzes the current response for potential issues
+#   - The critic identifies misunderstandings, unclear areas, and improvement opportunities
+#   - Constructive feedback focuses on specific problems rather than general criticism
+#   - The critique provides targeted guidance for the refinement phase
+#
+# PHASE 3 - RESPONSE REFINEMENT:
+#   - A refine model incorporates the critique to generate an improved response
+#   - The refine model considers both the original prompt and the feedback
+#   - Iterative improvement may address clarity, accuracy, or completeness issues
+#   - Multiple refinement loops may progressively enhance response quality
+#
+# REFINEMENT MODELING:
+# The system applies iterative improvement principles to AI response generation:
+#   - Separate models for different roles may provide specialized perspectives
+#   - Critical review helps identify blind spots in the initial response
+#   - Iterative refinement allows for progressive improvement over multiple cycles
+#   - Transparency through logging shows the evolution of the response
+#   - The process may help catch errors or improve clarity that single-pass generation misses
+#
+# The refinement process continues for a configurable number of loops,
+# with each iteration potentially improving upon the previous response.
+# The system emphasizes quality improvement through structured feedback and revision.
 
 # --- Model Configuration ---
 RESPONSE_MODEL="llama3:8b-instruct-q4_K_M"
@@ -13,8 +48,8 @@ DEFAULT_LOOPS=2
 
 # --- Argument Validation ---
 if [ "$#" -lt 1 ]; then
-    echo -e "\n\tDaydreaming"
-    echo -e "\tThis script uses a sequence of LLM calls to refine an initial response."
+    echo -e "\n\tCritique"
+    echo -e "\tThis script uses a sequence of LLM calls to refine an initial response through critique and revision."
     echo -e "\n\tUsage: $0 [-f <file_path>] \"<your prompt>\" [number_of_refinement_loops]"
     echo -e "\n\tExample: $0 -f ./input.txt \"Please summarize this text file\" 2"
     echo -e "\n\tIf number_of_refinement_loops is not provided, the program will default to $DEFAULT_LOOPS loops."
@@ -59,9 +94,9 @@ fi
 # Create a temporary directory if it doesn't exist
 mkdir -p ~/tmp
 # Create a unique file for this session based on the timestamp
-SESSION_FILE=~/tmp/dds_$(date +%Y%m%d_%H%M%S).txt
+SESSION_FILE=~/tmp/critique_$(date +%Y%m%d_%H%M%S).txt
 
-echo "DDS Session Log: ${SESSION_FILE}"
+echo "Critique Session Log: ${SESSION_FILE}"
 echo "---------------------------------"
 
 # --- Initial Prompt & Response ---
@@ -113,7 +148,7 @@ done
 # --- Final Output ---
 
 echo "---------------------------------"
-echo "DDS process complete."
+echo "Critique process complete."
 echo "Final refined answer:"
 echo "---------------------------------"
 
diff --git a/bash/exploration b/bash/exploration
new file mode 100755
index 0000000..8dc09b7
--- /dev/null
+++ b/bash/exploration
@@ -0,0 +1,246 @@
+#!/bin/bash
+
+# Get the directory where this script is located
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# Exploration System
+# This script systematically explores multiple solution paths and compares alternatives.
+#
+# APPLICATION LOGIC:
+# The exploration process implements a branching analysis system that systematically
+# explores multiple approaches to a problem and compares their merits. The system
+# operates through three distinct phases designed to maximize discovery and comparison:
+#
+# PHASE 1 - PATH GENERATION:
+#   - Identifies multiple possible approaches to the problem
+#   - Generates alternative solution paths
+#   - Ensures comprehensive coverage of the solution space
+#   - Creates a foundation for systematic comparison
+#
+# PHASE 2 - PATH EXPLORATION:
+#   - Explores each identified path in detail
+#   - Develops the implications and consequences of each approach
+#   - Identifies strengths, weaknesses, and trade-offs
+#   - Provides detailed analysis of each alternative
+#
+# PHASE 3 - COMPARATIVE ANALYSIS:
+#   - Systematically compares all explored paths
+#   - Evaluates relative merits and trade-offs
+#   - Identifies optimal approaches or combinations
+#   - Provides recommendations based on different criteria
+#
+# EXPLORATION MODELING:
+# The system applies systematic exploration principles to AI response generation:
+#   - Multiple paths ensure comprehensive problem coverage
+#   - Systematic comparison reveals optimal approaches
+#   - Trade-off analysis helps users make informed decisions
+#   - The process may reveal unexpected insights or approaches
+#   - Transparency shows how different paths were evaluated
+#   - The method may identify novel solutions that single-path approaches miss
+#
+# The exploration process emphasizes systematic analysis and comparison,
+# ensuring users understand the full range of possible approaches and their implications.
+
+# Source the logging system using absolute path
+source "${SCRIPT_DIR}/logging.sh"
+
+# --- Model Configuration ---
+EXPLORATION_MODEL="llama3:8b-instruct-q4_K_M"
+ANALYSIS_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
+
+# --- Defaults ---
+DEFAULT_PATHS=3
+
+# --- Argument Validation ---
+if [ "$#" -lt 1 ]; then
+    echo -e "\n\tExploration"
+    echo -e "\tThis script systematically explores multiple solution paths and compares alternatives."
+    echo -e "\n\tUsage: $0 [-f <file_path>] [-p <number_of_paths>] \"<your prompt>\" [number_of_rounds]"
+    echo -e "\n\tExample: $0 -f ./input.txt -p 4 \"How can we solve this problem?\" 2"
+    echo -e "\n\tIf number_of_rounds is not provided, the program will default to 2 rounds."
+    echo -e "\n\t-f <file_path> (optional): Append the contents of the file to the prompt."
+    echo -e "\n\t-p <paths> (optional): Number of solution paths to explore (default: 3)."
+    echo -e "\n"
+    exit 1
+fi
+
+# --- Argument Parsing ---
+FILE_PATH=""
+NUM_PATHS=$DEFAULT_PATHS
+while getopts "f:p:" opt; do
+  case $opt in
+    f)
+      FILE_PATH="$OPTARG"
+      ;;
+    p)
+      NUM_PATHS="$OPTARG"
+      ;;
+    *)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+PROMPT="$1"
+if [ -z "$2" ]; then
+    ROUNDS=2
+else
+    ROUNDS=$2
+fi
+
+# If file path is provided, append its contents to the prompt
+if [ -n "$FILE_PATH" ]; then
+    if [ ! -f "$FILE_PATH" ]; then
+        echo "File not found: $FILE_PATH" >&2
+        exit 1
+    fi
+    FILE_CONTENTS=$(cat "$FILE_PATH")
+    PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]"
+fi
+
+# --- File Initialization ---
+mkdir -p ~/tmp
+SESSION_FILE=~/tmp/exploration_$(date +%Y%m%d_%H%M%S).txt
+
+# Initialize timing
+SESSION_ID=$(generate_session_id)
+start_timer "$SESSION_ID" "exploration"
+
+echo "Exploration Session Log: ${SESSION_FILE}"
+echo "---------------------------------"
+
+# Store the initial user prompt in the session file
+echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}"
+echo "NUMBER OF PATHS: ${NUM_PATHS}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Phase 1: Path Generation ---
+echo "Phase 1: Generating solution paths..."
+echo "PHASE 1 - PATH GENERATION:" >> "${SESSION_FILE}"
+
+PATH_GENERATION_PROMPT="You are a strategic thinker. Your task is to identify ${NUM_PATHS} distinct, viable approaches to the following problem. Each path should represent a different strategy or methodology.
+
+PROBLEM: ${PROMPT}
+
+Please identify ${NUM_PATHS} different solution paths. For each path, provide:
+1. A clear name/title for the approach
+2. A brief description of the strategy
+3. The key principles or methodology it follows
+
+Format your response as:
+PATH 1: [Name]
+[Description]
+
+PATH 2: [Name]
+[Description]
+
+etc.
+
+Ensure the paths are genuinely different approaches, not just variations of the same idea."
+
+paths_output=$(ollama run "${EXPLORATION_MODEL}" "${PATH_GENERATION_PROMPT}")
+
+echo "GENERATED PATHS:" >> "${SESSION_FILE}"
+echo "${paths_output}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Phase 2: Path Exploration ---
+echo "Phase 2: Exploring each path in detail..."
+echo "PHASE 2 - PATH EXPLORATION:" >> "${SESSION_FILE}"
+
+declare -a path_analyses
+declare -a path_names
+
+# Extract path names and explore each one
+for i in $(seq 1 "${NUM_PATHS}"); do
+    echo "  Exploring path ${i}..."
+    
+    # Extract the path description (simplified approach)
+    path_section=$(echo "${paths_output}" | sed -n "/PATH ${i}:/,/PATH $((i+1)):/p" | sed '$d')
+    if [ -z "$path_section" ]; then
+        path_section=$(echo "${paths_output}" | sed -n "/PATH ${i}:/,\$p")
+    fi
+    
+    # Generate path name from the section
+    path_name=$(echo "${path_section}" | head -n1 | sed 's/^PATH [0-9]*: //')
+    
+    EXPLORATION_PROMPT="You are a detailed analyst. Explore the following solution path in depth, considering its implications, requirements, and potential outcomes.
+
+ORIGINAL PROBLEM: ${PROMPT}
+
+PATH TO EXPLORE: ${path_section}
+
+Please provide a comprehensive analysis of this path including:
+1. Detailed implementation approach
+2. Potential benefits and advantages
+3. Potential challenges and risks
+4. Resource requirements and constraints
+5. Expected outcomes and timeline
+6. Success factors and critical elements
+7. Potential variations or adaptations
+
+Provide a thorough, well-structured analysis."
+
+    path_analysis=$(ollama run "${EXPLORATION_MODEL}" "${EXPLORATION_PROMPT}")
+    
+    path_analyses[$((i-1))]="${path_analysis}"
+    path_names[$((i-1))]="${path_name}"
+    
+    echo "PATH ${i} ANALYSIS (${path_name}):" >> "${SESSION_FILE}"
+    echo "${path_analysis}" >> "${SESSION_FILE}"
+    echo "" >> "${SESSION_FILE}"
+done
+
+# --- Phase 3: Comparative Analysis ---
+echo "Phase 3: Comparing and evaluating paths..."
+echo "PHASE 3 - COMPARATIVE ANALYSIS:" >> "${SESSION_FILE}"
+
+# Create comparison prompt
+COMPARISON_PROMPT="You are a strategic analyst. Compare and evaluate the following solution paths for the given problem.
+
+ORIGINAL PROBLEM: ${PROMPT}
+
+PATH ANALYSES:"
+
+for i in $(seq 0 $((NUM_PATHS-1))); do
+    COMPARISON_PROMPT="${COMPARISON_PROMPT}
+
+PATH ${i+1}: ${path_names[$i]}
+${path_analyses[$i]}"
+done
+
+COMPARISON_PROMPT="${COMPARISON_PROMPT}
+
+Please provide a comprehensive comparative analysis including:
+1. Direct comparison of approaches across key criteria
+2. Relative strengths and weaknesses of each path
+3. Trade-offs and opportunity costs
+4. Risk assessment for each approach
+5. Recommendations based on different scenarios
+6. Potential for combining elements from multiple paths
+7. Final recommendation with justification
+
+Provide a clear, structured comparison that helps decision-making."
+
+comparative_analysis=$(ollama run "${ANALYSIS_MODEL}" "${COMPARISON_PROMPT}")
+
+echo "COMPARATIVE ANALYSIS:" >> "${SESSION_FILE}"
+echo "${comparative_analysis}" >> "${SESSION_FILE}"
+
+# End timing
+duration=$(end_timer "$SESSION_ID" "exploration")
+
+# --- Final Output ---
+echo "---------------------------------"
+echo "Exploration process complete."
+echo "Comparative analysis:"
+echo "---------------------------------"
+
+echo "${comparative_analysis}"
+echo ""
+echo "Paths explored: ${NUM_PATHS}"
+echo "Execution time: ${duration} seconds"
+echo ""
+echo "Full exploration log: ${SESSION_FILE}" 
\ No newline at end of file
diff --git a/bash/logging.sh b/bash/logging.sh
new file mode 100755
index 0000000..c37aaf4
--- /dev/null
+++ b/bash/logging.sh
@@ -0,0 +1,146 @@
+#!/bin/bash
+
+# Unified Logging System
+# This script provides consistent logging and performance metrics across all thinking mechanisms.
+
+# --- Logging Configuration ---
+LOG_DIR=~/tmp/ai_thinking
+METRICS_FILE="${LOG_DIR}/performance_metrics.json"
+SESSION_LOG="${LOG_DIR}/session_$(date +%Y%m%d_%H%M%S).json"
+
+# Create logging directory
+mkdir -p "${LOG_DIR}"
+
+# --- Timing Functions ---
+start_timer() {
+    local session_id="$1"
+    local mechanism="$2"
+    local start_time=$(date +%s.%N)
+    
+    # Store start time
+    echo "$start_time" > "/tmp/${session_id}_start"
+    
+    # Log session start
+    log_session_start "$session_id" "$mechanism" "$start_time"
+}
+
+end_timer() {
+    local session_id="$1"
+    local mechanism="$2"
+    local end_time=$(date +%s.%N)
+    local start_time=$(cat "/tmp/${session_id}_start" 2>/dev/null || echo "$end_time")
+    
+    # Calculate duration
+    local duration=$(echo "$end_time - $start_time" | bc -l 2>/dev/null || echo "0")
+    
+    # Log session end
+    log_session_end "$session_id" "$mechanism" "$end_time" "$duration"
+    
+    # Clean up
+    rm -f "/tmp/${session_id}_start"
+    
+    echo "$duration"
+}
+
+# --- Session Logging ---
+log_session_start() {
+    local session_id="$1"
+    local mechanism="$2"
+    local start_time="$3"
+    
+    cat > "${SESSION_LOG}" << EOF
+{
+  "session_id": "${session_id}",
+  "mechanism": "${mechanism}",
+  "start_time": "${start_time}",
+  "prompt": "${PROMPT:-""}",
+  "status": "started"
+}
+EOF
+}
+
+log_session_end() {
+    local session_id="$1"
+    local mechanism="$2"
+    local end_time="$3"
+    local duration="$4"
+    
+    # Update session log
+    cat > "${SESSION_LOG}" << EOF
+{
+  "session_id": "${session_id}",
+  "mechanism": "${mechanism}",
+  "start_time": "$(cat "${SESSION_LOG}" | jq -r '.start_time' 2>/dev/null || echo "")",
+  "end_time": "${end_time}",
+  "duration": "${duration}",
+  "prompt": "${PROMPT:-""}",
+  "status": "completed"
+}
+EOF
+    
+    # Update metrics file
+    update_metrics "$mechanism" "$duration"
+}
+
+# --- Metrics Management ---
+update_metrics() {
+    local mechanism="$1"
+    local duration="$2"
+    
+    # Create metrics file if it doesn't exist
+    if [ ! -f "${METRICS_FILE}" ]; then
+        cat > "${METRICS_FILE}" << EOF
+{
+  "total_sessions": 0,
+  "mechanisms": {},
+  "average_durations": {}
+}
+EOF
+    fi
+    
+    # Update metrics using jq (if available) or simple text processing
+    if command -v jq >/dev/null 2>&1; then
+        # Use jq for proper JSON handling
+        local temp_file=$(mktemp)
+        jq --arg mechanism "$mechanism" --arg duration "$duration" '
+            .total_sessions += 1 |
+            .mechanisms[$mechanism] = (.mechanisms[$mechanism] // 0) + 1 |
+            .average_durations[$mechanism] = (
+                (.average_durations[$mechanism] // 0) * (.mechanisms[$mechanism] - 1) + ($duration | tonumber)
+            ) / .mechanisms[$mechanism]
+        ' "${METRICS_FILE}" > "$temp_file"
+        mv "$temp_file" "${METRICS_FILE}"
+    else
+        # Fallback: simple text-based metrics
+        echo "$(date +%Y%m%d_%H%M%S),${mechanism},${duration}" >> "${LOG_DIR}/simple_metrics.csv"
+    fi
+}
+
+# --- Utility Functions ---
+generate_session_id() {
+    echo "session_$(date +%Y%m%d_%H%M%S)_$$"
+}
+
+get_metrics_summary() {
+    if [ -f "${METRICS_FILE}" ]; then
+        echo "=== Performance Metrics ==="
+        if command -v jq >/dev/null 2>&1; then
+            jq -r '.mechanisms | to_entries[] | "\(.key): \(.value) sessions"' "${METRICS_FILE}"
+            echo ""
+            jq -r '.average_durations | to_entries[] | "\(.key): \(.value | tonumber | floor)s average"' "${METRICS_FILE}"
+        else
+            echo "Metrics available in: ${METRICS_FILE}"
+        fi
+    else
+        echo "No metrics available yet."
+    fi
+}
+
+# --- Export Functions for Other Scripts ---
+export -f start_timer
+export -f end_timer
+export -f log_session_start
+export -f log_session_end
+export -f update_metrics
+export -f generate_session_id
+export -f get_metrics_summary 
\ No newline at end of file
diff --git a/bash/metrics b/bash/metrics
new file mode 100755
index 0000000..ad430b5
--- /dev/null
+++ b/bash/metrics
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Metrics Viewer
+# This script displays performance metrics from the unified logging system.
+
+# Source the logging system
+source ./logging.sh
+
+echo "AI Thinking System - Performance Metrics"
+echo "========================================"
+echo ""
+
+# Display metrics summary
+get_metrics_summary
+
+echo ""
+echo "Detailed metrics file: ${METRICS_FILE}"
+echo "Session logs directory: ${LOG_DIR}" 
\ No newline at end of file
diff --git a/bash/peer-review b/bash/peer-review
new file mode 100755
index 0000000..0a7be1a
--- /dev/null
+++ b/bash/peer-review
@@ -0,0 +1,259 @@
+#!/bin/bash
+
+# Peer Review System
+# This script implements a peer review process where one model provides an initial response
+# and other models review and suggest improvements through iterative refinement.
+#
+# APPLICATION LOGIC:
+# The peer review process implements a collaborative refinement system where AI models
+# provide structured feedback to improve response quality. The system operates through
+# three distinct phases designed to enhance clarity, accuracy, and completeness:
+#
+# PHASE 1 - INITIAL RESPONSE GENERATION:
+#   - A randomly selected model generates the first response to the user's prompt
+#   - Random selection helps prevent bias toward any particular model
+#   - The initial response serves as the foundation for peer review
+#   - This creates a starting point that can be improved through collective feedback
+#
+# PHASE 2 - PEER REVIEW:
+#   - Other models analyze the initial response and provide structured feedback
+#   - Reviewers suggest specific edits, clarifications, and improvements
+#   - Feedback focuses on clarity, completeness, accuracy, and organization
+#   - Multiple perspectives may identify different areas for improvement
+#
+# PHASE 3 - RESPONSE REFINEMENT:
+#   - The original responding model incorporates peer feedback to create an improved response
+#   - The model may revise, expand, clarify, or reorganize based on suggestions
+#   - Iterative improvement may address multiple rounds of feedback
+#   - The process emphasizes collaborative enhancement rather than replacement
+#
+# PEER REVIEW MODELING:
+# The system applies academic peer review principles to AI response refinement:
+#   - Random author selection helps prevent systematic bias in initial responses
+#   - Multiple reviewers provide diverse perspectives on the same work
+#   - Structured feedback focuses on specific improvements rather than general criticism
+#   - Author retains control over final revisions while considering peer input
+#   - Transparency through logging shows the evolution of the response
+#   - The process may help catch errors, improve clarity, or enhance completeness
+#
+# The peer review process continues for a configurable number of iterations,
+# with each cycle potentially improving upon the previous version.
+# The system emphasizes collaborative improvement through structured feedback and revision.
+
+# --- Model Configuration ---
+MODELS=(
+    "llama3:8b-instruct-q4_K_M"
+    "phi3:3.8b-mini-4k-instruct-q4_K_M"
+    "deepseek-r1:1.5b"
+    "gemma3n:e2b"
+    "dolphin3:latest"
+)
+
+# --- Defaults ---
+DEFAULT_ITERATIONS=1
+
+# --- Argument Validation ---
+if [ "$#" -lt 1 ]; then
+    echo -e "\n\tPeer Review"
+    echo -e "\tThis script implements a peer review process where one model provides an initial response"
+    echo -e "\tand other models review and suggest improvements through iterative refinement."
+    echo -e "\n\tUsage: $0 [-f <file_path>] \"<your prompt>\" [number_of_review_iterations]"
+    echo -e "\n\tExample: $0 -f ./input.txt \"Please analyze this text\" 1"
+    echo -e "\n\tIf number_of_review_iterations is not provided, the program will default to $DEFAULT_ITERATIONS iterations."
+    echo -e "\n\t-f <file_path> (optional): Append the contents of the file to the prompt."
+    echo -e "\n"
+    exit 1
+fi
+
+# --- Argument Parsing ---
+FILE_PATH=""
+while getopts "f:" opt; do
+  case $opt in
+    f)
+      FILE_PATH="$OPTARG"
+      ;;
+    *)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+PROMPT="$1"
+if [ -z "$2" ]; then
+    ITERATIONS=$DEFAULT_ITERATIONS
+else
+    ITERATIONS=$2
+fi
+
+# If file path is provided, append its contents to the prompt
+if [ -n "$FILE_PATH" ]; then
+    if [ ! -f "$FILE_PATH" ]; then
+        echo "File not found: $FILE_PATH" >&2
+        exit 1
+    fi
+    FILE_CONTENTS=$(cat "$FILE_PATH")
+    PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]"
+fi
+
+# --- File Initialization ---
+# Create a temporary directory if it doesn't exist
+mkdir -p ~/tmp
+# Create a unique file for this session based on the timestamp
+SESSION_FILE=~/tmp/peer-review_$(date +%Y%m%d_%H%M%S).txt
+
+echo "Peer Review Session Log: ${SESSION_FILE}"
+echo "---------------------------------"
+
+# Store the initial user prompt in the session file
+echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+echo "Processing peer review with ${ITERATIONS} iteration(s)..."
+
+# --- Random Author Selection ---
+AUTHOR_INDEX=$((RANDOM % ${#MODELS[@]}))
+AUTHOR_MODEL="${MODELS[$AUTHOR_INDEX]}"
+
+echo "Author model selected: ${AUTHOR_MODEL}"
+echo "AUTHOR MODEL: ${AUTHOR_MODEL}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Initial Response Generation ---
+echo "Generating initial response..."
+echo "INITIAL RESPONSE GENERATION:" >> "${SESSION_FILE}"
+echo "============================" >> "${SESSION_FILE}"
+
+INITIAL_PROMPT="You are an expert assistant. Please provide a comprehensive response to the following prompt. Be thorough, clear, and well-organized in your response.
+
+PROMPT: ${PROMPT}"
+
+INITIAL_RESPONSE=$(ollama run "${AUTHOR_MODEL}" "${INITIAL_PROMPT}")
+
+echo "INITIAL RESPONSE (${AUTHOR_MODEL}):" >> "${SESSION_FILE}"
+echo "${INITIAL_RESPONSE}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Peer Review Iterations ---
+CURRENT_RESPONSE="${INITIAL_RESPONSE}"
+
+for iteration in $(seq 1 "${ITERATIONS}"); do
+    echo "Starting peer review iteration ${iteration} of ${ITERATIONS}..."
+    echo "PEER REVIEW ITERATION ${iteration}:" >> "${SESSION_FILE}"
+    echo "=================================" >> "${SESSION_FILE}"
+    
+    # --- Step 1: Generate Peer Reviews ---
+    echo "Step 1: Generating peer reviews..."
+    echo "STEP 1 - PEER REVIEWS:" >> "${SESSION_FILE}"
+    
+    declare -a reviews
+    declare -a reviewer_names
+    
+    review_count=0
+    
+    for i in "${!MODELS[@]}"; do
+        # Skip the author model
+        if [ "$i" -eq "$AUTHOR_INDEX" ]; then
+            continue
+        fi
+        
+        model="${MODELS[$i]}"
+        echo "  Getting peer review from ${model}..."
+        
+        REVIEW_PROMPT="You are a peer reviewer. Your task is to provide constructive feedback on the following response. Focus on specific suggestions for improvement.
+
+REVIEW GUIDELINES:
+- Suggest specific edits or clarifications
+- Identify areas that could be made clearer or more concise
+- Point out any inaccuracies or missing information
+- Suggest improvements to organization or structure
+- Be constructive and specific in your feedback
+
+RESPONSE TO REVIEW: ${CURRENT_RESPONSE}
+
+Please provide your peer review feedback in a clear, structured format. Focus on actionable suggestions for improvement."
+
+        review_output=$(ollama run "${model}" "${REVIEW_PROMPT}")
+        
+        reviews[$review_count]="${review_output}"
+        reviewer_names[$review_count]="${model}"
+        
+        echo "REVIEW ${review_count+1} (${model}):" >> "${SESSION_FILE}"
+        echo "${review_output}" >> "${SESSION_FILE}"
+        echo "" >> "${SESSION_FILE}"
+        
+        review_count=$((review_count + 1))
+    done
+    
+    # --- Step 2: Generate Refined Response ---
+    echo "Step 2: Generating refined response based on peer feedback..."
+    echo "STEP 2 - RESPONSE REFINEMENT:" >> "${SESSION_FILE}"
+    
+    # Combine all reviews for the author
+    COMBINED_REVIEWS=""
+    for i in $(seq 0 $((review_count - 1))); do
+        COMBINED_REVIEWS="${COMBINED_REVIEWS}
+
+REVIEW FROM ${reviewer_names[$i]}:
+${reviews[$i]}"
+    done
+    
+    REFINE_PROMPT="You are the author of the following response. Your peers have provided constructive feedback to help improve your work. Please revise your response based on their suggestions.
+
+ORIGINAL PROMPT: ${PROMPT}
+YOUR CURRENT RESPONSE: ${CURRENT_RESPONSE}
+PEER REVIEW FEEDBACK: ${COMBINED_REVIEWS}
+
+Please provide a revised version of your response that:
+- Incorporates the constructive feedback from your peers
+- Addresses specific suggestions for improvement
+- Maintains your original insights while enhancing clarity and completeness
+- Shows how you've responded to the peer review process"
+
+    REFINED_RESPONSE=$(ollama run "${AUTHOR_MODEL}" "${REFINE_PROMPT}")
+    
+    echo "REFINED RESPONSE (${AUTHOR_MODEL}):" >> "${SESSION_FILE}"
+    echo "${REFINED_RESPONSE}" >> "${SESSION_FILE}"
+    echo "" >> "${SESSION_FILE}"
+    
+    # Update the current response for the next iteration
+    CURRENT_RESPONSE="${REFINED_RESPONSE}"
+    
+    echo "Peer review iteration ${iteration} complete."
+    echo "" >> "${SESSION_FILE}"
+done
+
+# --- Final Summary Generation ---
+echo "Generating final summary..."
+echo "FINAL SUMMARY GENERATION:" >> "${SESSION_FILE}"
+echo "========================" >> "${SESSION_FILE}"
+
+SUMMARY_PROMPT="You are an expert analyst. Based on the peer review process below, please provide a concise summary of the key improvements made and the overall quality of the final response.
+
+ORIGINAL PROMPT: ${PROMPT}
+FINAL REFINED RESPONSE: ${CURRENT_RESPONSE}
+
+Please provide a summary that:
+- Highlights the most significant improvements made through peer review
+- Notes the quality and effectiveness of the final response
+- Captures the collaborative nature of the peer review process
+- Is clear, concise, and well-organized"
+
+FINAL_SUMMARY=$(ollama run "${AUTHOR_MODEL}" "${SUMMARY_PROMPT}")
+
+echo "FINAL SUMMARY (${AUTHOR_MODEL}):" >> "${SESSION_FILE}"
+echo "${FINAL_SUMMARY}" >> "${SESSION_FILE}"
+
+# --- Final Output ---
+echo "---------------------------------"
+echo "Peer review process complete."
+echo "Final response:"
+echo "---------------------------------"
+
+echo "${CURRENT_RESPONSE}"
+echo ""
+echo "Author: ${AUTHOR_MODEL}"
+echo "Peer Review Summary:"
+echo "${FINAL_SUMMARY}"
+echo ""
+echo "Full peer review log: ${SESSION_FILE}" 
\ No newline at end of file
diff --git a/bash/socratic b/bash/socratic
new file mode 100755
index 0000000..8da0e3d
--- /dev/null
+++ b/bash/socratic
@@ -0,0 +1,213 @@
+#!/bin/bash
+
+# Socratic System
+# This script uses the Socratic method to refine responses through AI-generated questions and dialogue.
+#
+# APPLICATION LOGIC:
+# The Socratic process implements an iterative questioning system where AI models
+# engage in dialogue to explore, clarify, and refine responses. The system operates
+# through three distinct phases designed to deepen understanding and identify limitations:
+#
+# PHASE 1 - INITIAL RESPONSE GENERATION:
+#   - A response model generates the first answer to the user's prompt
+#   - The model provides a comprehensive initial response as the foundation
+#   - This creates the starting point for Socratic exploration
+#   - The response serves as the subject of subsequent questioning
+#
+# PHASE 2 - SOCRATIC QUESTIONING:
+#   - A question model analyzes the initial response and generates probing questions
+#   - Questions focus on clarifying assumptions, exploring implications, and considering alternatives
+#   - The question model identifies areas that need deeper examination
+#   - Questions are designed to reveal limitations, gaps, or unclear aspects
+#
+# PHASE 3 - RESPONSE REFINEMENT:
+#   - The original response model addresses the Socratic questions
+#   - The model may revise, expand, or clarify its initial response
+#   - This creates a dialogue that deepens the analysis
+#   - The process may reveal what cannot be determined or requires additional information
+#
+# SOCRATIC MODELING:
+# The system applies Socratic questioning principles to AI response refinement:
+#   - Separate models for questioning and responding may provide different perspectives
+#   - Probing questions help identify assumptions and limitations in the initial response
+#   - Iterative dialogue may reveal deeper insights or expose knowledge gaps
+#   - The process emphasizes intellectual honesty about what can and cannot be determined
+#   - Transparency through logging shows the evolution of understanding
+#   - The method may help catch overconfident claims or identify areas needing clarification
+#
+# The Socratic process continues for a configurable number of rounds,
+# with each iteration potentially revealing new insights or limitations.
+# The system emphasizes depth of analysis and intellectual honesty over definitive answers.
+
+# --- Model Configuration ---
+RESPONSE_MODEL="llama3:8b-instruct-q4_K_M"
+QUESTION_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
+
+# --- Defaults ---
+DEFAULT_ROUNDS=2
+
+# --- Argument Validation ---
+if [ "$#" -lt 1 ]; then
+    echo -e "\n\tSocratic"
+    echo -e "\tThis script uses the Socratic method to refine responses through AI-generated questions and dialogue."
+    echo -e "\n\tUsage: $0 [-f <file_path>] \"<your prompt>\" [number_of_questioning_rounds]"
+    echo -e "\n\tExample: $0 -f ./input.txt \"Please analyze this text\" 2"
+    echo -e "\n\tIf number_of_questioning_rounds is not provided, the program will default to $DEFAULT_ROUNDS rounds."
+    echo -e "\n\t-f <file_path> (optional): Append the contents of the file to the prompt."
+    echo -e "\n"
+    exit 1
+fi
+
+# --- Argument Parsing ---
+FILE_PATH=""
+while getopts "f:" opt; do
+  case $opt in
+    f)
+      FILE_PATH="$OPTARG"
+      ;;
+    *)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+PROMPT="$1"
+if [ -z "$2" ]; then
+    ROUNDS=$DEFAULT_ROUNDS
+else
+    ROUNDS=$2
+fi
+
+# If file path is provided, append its contents to the prompt
+if [ -n "$FILE_PATH" ]; then
+    if [ ! -f "$FILE_PATH" ]; then
+        echo "File not found: $FILE_PATH" >&2
+        exit 1
+    fi
+    FILE_CONTENTS=$(cat "$FILE_PATH")
+    PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]"
+fi
+
+# --- File Initialization ---
+# Create a temporary directory if it doesn't exist
+mkdir -p ~/tmp
+# Create a unique file for this session based on the timestamp
+SESSION_FILE=~/tmp/socratic_$(date +%Y%m%d_%H%M%S).txt
+
+echo "Socratic Session Log: ${SESSION_FILE}"
+echo "---------------------------------"
+
+# Store the initial user prompt in the session file
+echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+echo "Processing Socratic dialogue with ${ROUNDS} questioning rounds..."
+
+# --- Initial Response Generation ---
+echo "Generating initial response..."
+echo "INITIAL RESPONSE GENERATION:" >> "${SESSION_FILE}"
+echo "============================" >> "${SESSION_FILE}"
+
+INITIAL_PROMPT="You are an expert assistant. Please provide a comprehensive response to the following prompt. Be thorough but also honest about any limitations in your knowledge or areas where you cannot provide definitive answers.
+
+PROMPT: ${PROMPT}"
+
+INITIAL_RESPONSE=$(ollama run "${RESPONSE_MODEL}" "${INITIAL_PROMPT}")
+
+echo "INITIAL RESPONSE (${RESPONSE_MODEL}):" >> "${SESSION_FILE}"
+echo "${INITIAL_RESPONSE}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Socratic Dialogue Rounds ---
+CURRENT_RESPONSE="${INITIAL_RESPONSE}"
+
+for round in $(seq 1 "${ROUNDS}"); do
+    echo "Starting Socratic round ${round} of ${ROUNDS}..."
+    echo "SOCRATIC ROUND ${round}:" >> "${SESSION_FILE}"
+    echo "=======================" >> "${SESSION_FILE}"
+    
+    # --- Step 1: Generate Socratic Questions ---
+    echo "Step 1: Generating Socratic questions..."
+    echo "STEP 1 - QUESTION GENERATION:" >> "${SESSION_FILE}"
+    
+    QUESTION_PROMPT="You are a Socratic questioner. Your task is to analyze the following response and generate 2-3 probing questions that will help clarify, refine, or explore the response more deeply.
+
+Focus on questions that:
+- Clarify assumptions or definitions
+- Explore implications or consequences
+- Consider alternative perspectives
+- Identify areas where the response may be incomplete or uncertain
+- Flag what cannot be determined with the given information
+
+RESPONSE TO QUESTION: ${CURRENT_RESPONSE}
+
+Generate your questions in a clear, numbered format. Be specific and avoid yes/no questions."
+
+    QUESTIONS=$(ollama run "${QUESTION_MODEL}" "${QUESTION_PROMPT}")
+    
+    echo "QUESTIONS (${QUESTION_MODEL}):" >> "${SESSION_FILE}"
+    echo "${QUESTIONS}" >> "${SESSION_FILE}"
+    echo "" >> "${SESSION_FILE}"
+    
+    # --- Step 2: Generate Refined Response ---
+    echo "Step 2: Generating refined response to questions..."
+    echo "STEP 2 - RESPONSE REFINEMENT:" >> "${SESSION_FILE}"
+    
+    REFINE_PROMPT="You are an expert assistant. Your previous response has been analyzed and the following Socratic questions have been raised. Please provide a refined, expanded, or clarified response that addresses these questions.
+
+ORIGINAL PROMPT: ${PROMPT}
+YOUR PREVIOUS RESPONSE: ${CURRENT_RESPONSE}
+SOCRATIC QUESTIONS: ${QUESTIONS}
+
+Please provide a comprehensive response that:
+- Addresses each question raised
+- Clarifies any assumptions or definitions
+- Explores implications and alternatives
+- Honestly acknowledges what cannot be determined
+- Refines or expands your original response based on the questioning"
+
+    REFINED_RESPONSE=$(ollama run "${RESPONSE_MODEL}" "${REFINE_PROMPT}")
+    
+    echo "REFINED RESPONSE (${RESPONSE_MODEL}):" >> "${SESSION_FILE}"
+    echo "${REFINED_RESPONSE}" >> "${SESSION_FILE}"
+    echo "" >> "${SESSION_FILE}"
+    
+    # Update the current response for the next round
+    CURRENT_RESPONSE="${REFINED_RESPONSE}"
+    
+    echo "Socratic round ${round} complete."
+    echo "" >> "${SESSION_FILE}"
+done
+
+# --- Final Summary Generation ---
+echo "Generating final summary..."
+echo "FINAL SUMMARY GENERATION:" >> "${SESSION_FILE}"
+echo "========================" >> "${SESSION_FILE}"
+
+SUMMARY_PROMPT="You are an expert analyst. Based on the Socratic dialogue below, please provide a concise summary of the key insights, conclusions, and limitations that emerged from the questioning process.
+
+ORIGINAL PROMPT: ${PROMPT}
+FINAL REFINED RESPONSE: ${CURRENT_RESPONSE}
+
+Please provide a summary that:
+- Highlights the most important insights discovered
+- Identifies key conclusions that can be drawn
+- Notes any limitations or areas that cannot be determined
+- Captures the evolution of understanding through the dialogue
+- Is clear, concise, and well-organized"
+
+FINAL_SUMMARY=$(ollama run "${RESPONSE_MODEL}" "${SUMMARY_PROMPT}")
+
+echo "FINAL SUMMARY (${RESPONSE_MODEL}):" >> "${SESSION_FILE}"
+echo "${FINAL_SUMMARY}" >> "${SESSION_FILE}"
+
+# --- Final Output ---
+echo "---------------------------------"
+echo "Socratic process complete."
+echo "Final summary:"
+echo "---------------------------------"
+
+echo "${FINAL_SUMMARY}"
+echo ""
+echo "Full Socratic dialogue log: ${SESSION_FILE}" 
\ No newline at end of file
diff --git a/bash/synthesis b/bash/synthesis
new file mode 100755
index 0000000..417279e
--- /dev/null
+++ b/bash/synthesis
@@ -0,0 +1,240 @@
+#!/bin/bash
+
+# Get the directory where this script is located
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# Synthesis System
+# This script combines outputs from multiple thinking mechanisms into a coherent final response.
+#
+# APPLICATION LOGIC:
+# The synthesis process implements a multi-mechanism integration system that combines
+# outputs from different thinking strategies into a unified, coherent response. The system
+# operates through three distinct phases designed to maximize comprehensiveness and clarity:
+#
+# PHASE 1 - MECHANISM EXECUTION:
+#   - Executes multiple thinking mechanisms on the same prompt
+#   - Collects diverse perspectives and approaches
+#   - Ensures comprehensive coverage of the problem space
+#   - Creates a foundation for synthesis
+#
+# PHASE 2 - CONFLICT RESOLUTION:
+#   - Identifies contradictions and conflicts between mechanism outputs
+#   - Resolves disagreements through logical analysis
+#   - Prioritizes information based on confidence and relevance
+#   - Maintains intellectual honesty about uncertainties
+#
+# PHASE 3 - SYNTHESIS GENERATION:
+#   - Combines the best elements from each mechanism
+#   - Creates a coherent narrative that addresses all aspects
+#   - Provides a unified response that leverages multiple perspectives
+#   - Ensures the final output is greater than the sum of its parts
+#
+# SYNTHESIS MODELING:
+# The system applies integrative thinking principles to AI response generation:
+#   - Multiple mechanisms provide diverse perspectives on the same problem
+#   - Conflict resolution ensures logical consistency in the final output
+#   - Synthesis leverages the strengths of each individual mechanism
+#   - The process may reveal insights that individual mechanisms miss
+#   - Transparency shows how different perspectives were integrated
+#   - The method may provide more comprehensive and balanced responses
+#
+# The synthesis process emphasizes comprehensiveness and coherence,
+# ensuring users get the benefits of multiple thinking approaches in a unified response.
+
+# Source the logging system using absolute path
+source "${SCRIPT_DIR}/logging.sh"
+
+# --- Model Configuration ---
+SYNTHESIS_MODEL="llama3:8b-instruct-q4_K_M"
+
+# --- Defaults ---
+DEFAULT_MECHANISMS=("consensus" "critique" "socratic")
+
+# --- Argument Validation ---
+if [ "$#" -lt 1 ]; then
+    echo -e "\n\tSynthesis"
+    echo -e "\tThis script combines outputs from multiple thinking mechanisms into a coherent final response."
+    echo -e "\n\tUsage: $0 [-f <file_path>] [-m <mechanism1,mechanism2,...>] \"<your prompt>\" [number_of_rounds]"
+    echo -e "\n\tExample: $0 -f ./input.txt -m consensus,critique \"Please analyze this text\" 2"
+    echo -e "\n\tIf number_of_rounds is not provided, the program will default to 2 rounds."
+    echo -e "\n\t-f <file_path> (optional): Append the contents of the file to the prompt."
+    echo -e "\n\t-m <mechanisms> (optional): Comma-separated list of mechanisms to use (default: consensus,critique,socratic)."
+    echo -e "\n"
+    exit 1
+fi
+
+# --- Argument Parsing ---
+FILE_PATH=""
+MECHANISMS_STR=""
+while getopts "f:m:" opt; do
+  case $opt in
+    f)
+      FILE_PATH="$OPTARG"
+      ;;
+    m)
+      MECHANISMS_STR="$OPTARG"
+      ;;
+    *)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+PROMPT="$1"
+if [ -z "$2" ]; then
+    ROUNDS=2
+else
+    ROUNDS=$2
+fi
+
+# Parse mechanisms
+if [ -n "$MECHANISMS_STR" ]; then
+    IFS=',' read -ra MECHANISMS <<< "$MECHANISMS_STR"
+else
+    MECHANISMS=("${DEFAULT_MECHANISMS[@]}")
+fi
+
+# If file path is provided, append its contents to the prompt
+if [ -n "$FILE_PATH" ]; then
+    if [ ! -f "$FILE_PATH" ]; then
+        echo "File not found: $FILE_PATH" >&2
+        exit 1
+    fi
+    FILE_CONTENTS=$(cat "$FILE_PATH")
+    PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]"
+fi
+
+# --- File Initialization ---
+mkdir -p ~/tmp
+SESSION_FILE=~/tmp/synthesis_$(date +%Y%m%d_%H%M%S).txt
+
+# Initialize timing
+SESSION_ID=$(generate_session_id)
+start_timer "$SESSION_ID" "synthesis"
+
+echo "Synthesis Session Log: ${SESSION_FILE}"
+echo "---------------------------------"
+
+# Store the initial user prompt in the session file
+echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}"
+echo "MECHANISMS: ${MECHANISMS[*]}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Phase 1: Mechanism Execution ---
+echo "Phase 1: Executing thinking mechanisms..."
+echo "PHASE 1 - MECHANISM EXECUTION:" >> "${SESSION_FILE}"
+
+declare -a mechanism_outputs
+declare -a mechanism_names
+
+for i in "${!MECHANISMS[@]}"; do
+    mechanism="${MECHANISMS[$i]}"
+    echo "  Executing ${mechanism} mechanism..."
+    
+    # Execute the mechanism using absolute path
+    if [ -f "${SCRIPT_DIR}/${mechanism}" ]; then
+        output=$("${SCRIPT_DIR}/${mechanism}" "${PROMPT}" "${ROUNDS}" 2>&1)
+        mechanism_outputs[$i]="${output}"
+        mechanism_names[$i]="${mechanism}"
+        
+        echo "MECHANISM ${i+1} (${mechanism}):" >> "${SESSION_FILE}"
+        echo "${output}" >> "${SESSION_FILE}"
+        echo "" >> "${SESSION_FILE}"
+    else
+        echo "  WARNING: Mechanism ${mechanism} not found, skipping..." >&2
+    fi
+done
+
+# --- Phase 2: Conflict Resolution ---
+echo "Phase 2: Analyzing and resolving conflicts..."
+echo "PHASE 2 - CONFLICT RESOLUTION:" >> "${SESSION_FILE}"
+
+# Create conflict analysis prompt
+CONFLICT_PROMPT="You are a conflict resolution specialist. Analyze the following outputs from different thinking mechanisms and identify any contradictions, conflicts, or areas of disagreement.
+
+ORIGINAL PROMPT: ${PROMPT}
+
+MECHANISM OUTPUTS:"
+
+for i in "${!MECHANISMS[@]}"; do
+    if [ -n "${mechanism_outputs[$i]}" ]; then
+        CONFLICT_PROMPT="${CONFLICT_PROMPT}
+
+${mechanism_names[$i]} OUTPUT:
+${mechanism_outputs[$i]}"
+    fi
+done
+
+CONFLICT_PROMPT="${CONFLICT_PROMPT}
+
+Please identify:
+1. Any direct contradictions between the outputs
+2. Areas where the mechanisms disagree
+3. Information that appears to be conflicting
+4. How these conflicts might be resolved
+
+Provide a clear analysis of conflicts and potential resolutions."
+
+conflict_analysis=$(ollama run "${SYNTHESIS_MODEL}" "${CONFLICT_PROMPT}")
+
+echo "CONFLICT ANALYSIS:" >> "${SESSION_FILE}"
+echo "${conflict_analysis}" >> "${SESSION_FILE}"
+echo "" >> "${SESSION_FILE}"
+
+# --- Phase 3: Synthesis Generation ---
+echo "Phase 3: Generating unified synthesis..."
+echo "PHASE 3 - SYNTHESIS GENERATION:" >> "${SESSION_FILE}"
+
+# Create synthesis prompt
+SYNTHESIS_PROMPT="You are a synthesis specialist. Your task is to combine the outputs from multiple thinking mechanisms into a coherent, unified response that leverages the strengths of each approach.
+
+ORIGINAL PROMPT: ${PROMPT}
+
+MECHANISM OUTPUTS:"
+
+for i in "${!MECHANISMS[@]}"; do
+    if [ -n "${mechanism_outputs[$i]}" ]; then
+        SYNTHESIS_PROMPT="${SYNTHESIS_PROMPT}
+
+${mechanism_names[$i]} OUTPUT:
+${mechanism_outputs[$i]}"
+    fi
+done
+
+SYNTHESIS_PROMPT="${SYNTHESIS_PROMPT}
+
+CONFLICT ANALYSIS:
+${conflict_analysis}
+
+Please create a unified synthesis that:
+1. Combines the best insights from each mechanism
+2. Resolves any identified conflicts logically
+3. Provides a comprehensive response that addresses all aspects
+4. Maintains intellectual honesty about uncertainties
+5. Creates a coherent narrative that flows naturally
+6. Leverages the unique strengths of each thinking approach
+
+Your synthesis should be greater than the sum of its parts - it should provide insights that individual mechanisms might miss."
+
+final_synthesis=$(ollama run "${SYNTHESIS_MODEL}" "${SYNTHESIS_PROMPT}")
+
+echo "FINAL SYNTHESIS:" >> "${SESSION_FILE}"
+echo "${final_synthesis}" >> "${SESSION_FILE}"
+
+# End timing
+duration=$(end_timer "$SESSION_ID" "synthesis")
+
+# --- Final Output ---
+echo "---------------------------------"
+echo "Synthesis process complete."
+echo "Final unified response:"
+echo "---------------------------------"
+
+echo "${final_synthesis}"
+echo ""
+echo "Mechanisms used: ${MECHANISMS[*]}"
+echo "Execution time: ${duration} seconds"
+echo ""
+echo "Full synthesis log: ${SESSION_FILE}" 
\ No newline at end of file
diff --git a/bash/c-2-f b/bash/unit-conversion/c-2-f
index 597cb48..597cb48 100755
--- a/bash/c-2-f
+++ b/bash/unit-conversion/c-2-f
diff --git a/bash/f-2-c b/bash/unit-conversion/f-2-c
index b50df93..b50df93 100755
--- a/bash/f-2-c
+++ b/bash/unit-conversion/f-2-c