#!/bin/bash # Get the directory where this script is located SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Computer Dispatch System # This script intelligently routes prompts to the most appropriate thinking mechanism # or directly to Ollama based on complexity, question type, and user intent. # # APPLICATION LOGIC: # The computer dispatch system implements an intelligent routing mechanism that # analyzes user prompts and determines the optimal response strategy. The system # operates through three distinct phases designed to maximize response quality: # # PHASE 1 - PROMPT ANALYSIS: # - Analyzes prompt complexity, length, and question type # - Identifies user intent and specific keywords # - Determines if direct Ollama response is appropriate # - Classifies prompts into response categories # # PHASE 2 - MECHANISM SELECTION: # - Routes to appropriate thinking mechanism based on classification # - Uses decision tree with keywords for clear cases # - Considers prompt complexity and user intent # - Falls back to direct Ollama for simple cases # # PHASE 3 - RESPONSE EXECUTION: # - Executes the selected mechanism or direct Ollama call # - Maintains transparency about the routing decision # - Provides consistent output format regardless of mechanism # - Logs the decision process for analysis # # DISPATCH MODELING: # The system applies intelligent routing principles to AI response generation: # - Prompt classification helps match complexity to appropriate mechanism # - Keyword analysis identifies specific user needs and intent # - Decision tree provides consistent, predictable routing logic # - Direct Ollama routing handles simple cases efficiently # - Transparency shows users how their prompt was processed # - The system may improve response quality by using specialized mechanisms # # The dispatch process emphasizes efficiency and appropriateness, # ensuring users get the best possible response for their specific needs. # The system balances speed with depth based on prompt characteristics. # --- Model Configuration --- DEFAULT_MODEL="gemma3n:e2b" # --- Defaults --- DEFAULT_ROUNDS=2 # --- Argument Validation --- if [ "$#" -lt 1 ]; then show_computer_help exit 1 fi # Help function show_computer_help() { echo -e "\n\tComputer" echo -e "\tThis script intelligently routes prompts to the most appropriate thinking mechanism" echo -e "\tor directly to Ollama based on complexity, question type, and user intent." echo -e "\n\tUsage: $0 [options] \"\" [number_of_rounds]" echo -e "\n\tOptions:" echo -e "\t -f Append the contents of the file to the prompt" echo -e "\t -d Force direct Ollama response (bypass thinking mechanisms)" echo -e "\t -m Manually select thinking mechanism:" echo -e "\t direct, socratic, exploration, consensus, critique," echo -e "\t synthesis, peer-review, puzzle" echo -e "\t -h, --help Show this help message" echo -e "\n\tExamples:" echo -e "\t $0 \"What is 2+2?\" # Auto-routing" echo -e "\t $0 -f document.txt \"Analyze this\" 3 # With file, 3 rounds" echo -e "\t $0 -d \"Simple question\" # Direct response only" echo -e "\t $0 -m puzzle \"Using Lil, how can I...\" # Force puzzle mechanism" echo -e "\n\tIf number_of_rounds is not provided, defaults to $DEFAULT_ROUNDS rounds." echo -e "\n" } # Available mechanisms show_mechanisms() { echo -e "\n\tAvailable Thinking Mechanisms:" echo -e "\t direct - Simple questions, direct answers" echo -e "\t socratic - Deep questioning and analysis" echo -e "\t exploration - Multiple solution paths and comparison" echo -e "\t consensus - Multiple model agreement" echo -e "\t critique - Improvement suggestions and refinement" echo -e "\t synthesis - Combining and integrating approaches" echo -e "\t peer-review - Collaborative feedback and review" echo -e "\t puzzle - Coding problems and Lil programming" echo -e "\n" } # --- Argument Parsing --- FILE_PATH="" FORCE_DIRECT=false MANUAL_MECHANISM="" while getopts "f:dm:h-:" opt; do case $opt in f) FILE_PATH="$OPTARG" ;; d) FORCE_DIRECT=true ;; m) MANUAL_MECHANISM="$OPTARG" ;; h) show_computer_help exit 0 ;; -) case "${OPTARG}" in help) show_computer_help exit 0 ;; mechanisms) show_mechanisms exit 0 ;; *) echo "Invalid option: --${OPTARG}" >&2 exit 1 ;; esac ;; *) echo "Invalid option: -$OPTARG" >&2 show_computer_help exit 1 ;; esac done shift $((OPTIND -1)) PROMPT="$1" if [ -z "$2" ]; then ROUNDS=$DEFAULT_ROUNDS else ROUNDS=$2 fi # Store original prompt for validation after sourcing ORIGINAL_PROMPT="$PROMPT" ORIGINAL_FILE_PATH="$FILE_PATH" ORIGINAL_ROUNDS="$ROUNDS" # Source the logging system using absolute path source "${SCRIPT_DIR}/logging.sh" # Ensure validation functions are available if ! command -v validate_prompt >/dev/null 2>&1; then echo "Error: Validation functions not loaded properly" >&2 exit 1 fi # Validate and set default model with fallback DEFAULT_MODEL=$(validate_model "$DEFAULT_MODEL" "llama3:8b-instruct-q4_K_M") if [ $? -ne 0 ]; then log_error "No valid default model available" exit 1 fi # Validate prompt PROMPT=$(validate_prompt "$ORIGINAL_PROMPT") if [ $? -ne 0 ]; then exit 1 fi # Validate file path if provided if [ -n "$ORIGINAL_FILE_PATH" ]; then if ! validate_file_path "$ORIGINAL_FILE_PATH"; then exit 1 fi FILE_CONTENTS=$(cat "$ORIGINAL_FILE_PATH") PROMPT="$PROMPT\n[FILE CONTENTS]\n$FILE_CONTENTS\n[END FILE]" fi # Validate rounds if ! [[ "$ORIGINAL_ROUNDS" =~ ^[1-9][0-9]*$ ]] || [ "$ORIGINAL_ROUNDS" -gt 5 ]; then log_error "Invalid number of rounds: $ORIGINAL_ROUNDS (must be 1-5)" exit 1 fi # --- File Initialization --- # Create a temporary directory if it doesn't exist mkdir -p ~/tmp # Create a unique file for this session based on the timestamp SESSION_FILE=~/tmp/computer_$(date +%Y%m%d_%H%M%S).txt # Initialize timing SESSION_ID=$(generate_session_id) start_timer "$SESSION_ID" "computer" echo "Computer Dispatch Session Log: ${SESSION_FILE}" echo "---------------------------------" # Store the initial user prompt in the session file echo "USER PROMPT: ${PROMPT}" >> "${SESSION_FILE}" echo "FORCE DIRECT: ${FORCE_DIRECT}" >> "${SESSION_FILE}" echo "" >> "${SESSION_FILE}" # --- Advanced Prompt Analysis Function --- analyze_prompt() { local prompt="$1" local use_advanced="${2:-true}" # Check for direct Ollama requests (explicit user intent) if [[ "$prompt" =~ (direct|simple|quick|fast|straight) ]]; then echo "DIRECT:1.0" return fi # Use advanced classification if available if [ "$use_advanced" = "true" ] && [ -f "${SCRIPT_DIR}/classifier.sh" ]; then source "${SCRIPT_DIR}/classifier.sh" local result=$(classify_prompt "$prompt" true) if [[ "$result" =~ ^[A-Z_]+:[0-9.]+$ ]]; then echo "$result" return else log_warning "Advanced classifier failed, falling back to simple classification" fi fi # Fallback to simple classification local analysis="" local confidence="0.6" # Check prompt length (simple heuristic for complexity) local word_count=$(echo "$prompt" | wc -w) # Very short prompts (likely simple questions) if [ "$word_count" -le 5 ]; then echo "DIRECT:0.8" return fi # Keyword-based classification with priority order if [[ "$prompt" =~ (consensus|agree|disagree|vote|multiple.*perspectives|multiple.*opinions) ]]; then analysis="CONSENSUS" elif [[ "$prompt" =~ (synthesize|combine|integrate|unify|merge|consolidate) ]]; then analysis="SYNTHESIS" elif [[ "$prompt" =~ (explore.*paths|explore.*alternatives|compare.*strategies|compare.*approaches|what.*options) ]]; then analysis="EXPLORATION" elif [[ "$prompt" =~ (improve|refine|edit|revise|better|enhance|polish|fix|optimize) ]]; then analysis="CRITIQUE" elif [[ "$prompt" =~ (review|feedback|peer.*review|collaborate|suggest|advice) ]]; then analysis="PEER_REVIEW" elif [[ "$prompt" =~ (analyze|examine|investigate|deep.*dive|thorough.*analysis|comprehensive) ]]; then analysis="SOCRATIC" elif [[ "$prompt" =~ (explore|alternatives|options|compare|strategies|approaches) ]]; then analysis="EXPLORATION" confidence="0.5" # Lower confidence due to ambiguous keywords else # Default to direct for unclear cases analysis="DIRECT" confidence="0.4" fi echo "$analysis:$confidence" } # --- Mechanism Selection --- echo "Analyzing prompt and selecting mechanism..." echo "PROMPT ANALYSIS:" >> "${SESSION_FILE}" if [ "$FORCE_DIRECT" = true ]; then MECHANISM="DIRECT" CONFIDENCE="1.0" REASON="User requested direct response with -d flag" else # Check for manual mechanism selection if [ -n "$MANUAL_MECHANISM" ]; then # Validate manual mechanism selection case "$MANUAL_MECHANISM" in direct|DIRECT) MECHANISM="DIRECT" CONFIDENCE="1.0" REASON="User manually selected direct mechanism" ;; socratic|SOCRATIC) MECHANISM="SOCRATIC" CONFIDENCE="1.0" REASON="User manually selected socratic mechanism" ;; exploration|EXPLORATION) MECHANISM="EXPLORATION" CONFIDENCE="1.0" REASON="User manually selected exploration mechanism" ;; consensus|CONSENSUS) MECHANISM="CONSENSUS" CONFIDENCE="1.0" REASON="User manually selected consensus mechanism" ;; critique|CRITIQUE) MECHANISM="CRITIQUE" CONFIDENCE="1.0" REASON="User manually selected critique mechanism" ;; synthesis|SYNTHESIS) MECHANISM="SYNTHESIS" CONFIDENCE="1.0" REASON="User manually selected synthesis mechanism" ;; peer-review|peer_review|PEER_REVIEW|PEER-REVIEW) MECHANISM="PEER_REVIEW" CONFIDENCE="1.0" REASON="User manually selected peer-review mechanism" ;; puzzle|PUZZLE) MECHANISM="PUZZLE" CONFIDENCE="1.0" REASON="User manually selected puzzle mechanism" ;; *) echo "Error: Invalid mechanism '$MANUAL_MECHANISM'" >&2 echo "Use --mechanisms to see available options." >&2 exit 1 ;; esac else ANALYSIS_RESULT=$(analyze_prompt "$PROMPT") MECHANISM=$(echo "$ANALYSIS_RESULT" | cut -d':' -f1) CONFIDENCE=$(echo "$ANALYSIS_RESULT" | cut -d':' -f2) # Validate confidence score if [[ ! "$CONFIDENCE" =~ ^[0-9.]+$ ]]; then CONFIDENCE="0.5" log_warning "Invalid confidence score, defaulting to 0.5" fi fi case "$MECHANISM" in "DIRECT") REASON="Simple prompt or direct request (confidence: $CONFIDENCE)" ;; "CONSENSUS") REASON="Multiple perspectives or consensus needed (confidence: $CONFIDENCE)" ;; "SYNTHESIS") REASON="Integration of multiple approaches needed (confidence: $CONFIDENCE)" ;; "EXPLORATION") REASON="Systematic exploration of alternatives needed (confidence: $CONFIDENCE)" ;; "SOCRATIC") REASON="Deep analysis or exploration required (confidence: $CONFIDENCE)" ;; "CRITIQUE") REASON="Improvement or refinement requested (confidence: $CONFIDENCE)" ;; "PEER_REVIEW") REASON="Collaborative review or feedback needed (confidence: $CONFIDENCE)" ;; "PUZZLE") REASON="Puzzle solving or coding challenge (confidence: $CONFIDENCE)" ;; *) REASON="Default fallback (confidence: $CONFIDENCE)" MECHANISM="DIRECT" ;; esac # Low confidence warning if (( $(echo "$CONFIDENCE < 0.6" | bc -l 2>/dev/null || echo "0") )); then log_warning "Low classification confidence ($CONFIDENCE) for prompt: $PROMPT" echo "Note: Classification confidence is low ($CONFIDENCE). Consider using -d for direct response." >&2 fi fi echo "Selected mechanism: ${MECHANISM}" >> "${SESSION_FILE}" echo "Reason: ${REASON}" >> "${SESSION_FILE}" echo "" >> "${SESSION_FILE}" echo "Selected mechanism: ${MECHANISM}" echo "Reason: ${REASON}" echo "---------------------------------" # --- Response Execution --- echo "Executing selected mechanism..." echo "RESPONSE EXECUTION:" >> "${SESSION_FILE}" case "$MECHANISM" in "DIRECT") echo "Using direct Ollama response..." echo "DIRECT OLLAMA RESPONSE:" >> "${SESSION_FILE}" DIRECT_PROMPT="You are an expert assistant. You always flag if you don't know something. Please provide a clear, helpful response to the following prompt: ${PROMPT}" RESPONSE=$(ollama run "${DEFAULT_MODEL}" "${DIRECT_PROMPT}") echo "${RESPONSE}" >> "${SESSION_FILE}" echo "" >> "${SESSION_FILE}" echo "---------------------------------" echo "Direct response:" echo "---------------------------------" echo "${RESPONSE}" ;; "CONSENSUS") echo "Delegating to consensus mechanism..." echo "DELEGATING TO CONSENSUS:" >> "${SESSION_FILE}" # Execute consensus script and display output directly "${SCRIPT_DIR}/consensus" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; "SOCRATIC") echo "Delegating to Socratic mechanism..." echo "DELEGATING TO SOCRATIC:" >> "${SESSION_FILE}" # Execute Socratic script and display output directly "${SCRIPT_DIR}/socratic" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; "CRITIQUE") echo "Delegating to critique mechanism..." echo "DELEGATING TO CRITIQUE:" >> "${SESSION_FILE}" # Execute critique script and display output directly "${SCRIPT_DIR}/critique" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; "PEER_REVIEW") echo "Delegating to peer-review mechanism..." echo "DELEGATING TO PEER_REVIEW:" >> "${SESSION_FILE}" # Execute peer-review script and display output directly "${SCRIPT_DIR}/peer-review" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; "SYNTHESIS") echo "Delegating to synthesis mechanism..." echo "DELEGATING TO SYNTHESIS:" >> "${SESSION_FILE}" # Execute synthesis script and display output directly "${SCRIPT_DIR}/synthesis" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; "EXPLORATION") echo "Delegating to exploration mechanism..." echo "DELEGATING TO EXPLORATION:" >> "${SESSION_FILE}" # Execute exploration script and display output directly "${SCRIPT_DIR}/exploration" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; "PUZZLE") echo "Delegating to puzzle mechanism..." echo "DELEGATING TO PUZZLE:" >> "${SESSION_FILE}" # Execute puzzle script and display output directly "${SCRIPT_DIR}/puzzle" "${PROMPT}" "${ROUNDS}" 2>&1 | tee -a "${SESSION_FILE}" ;; esac # --- Final Summary --- echo "" >> "${SESSION_FILE}" echo "DISPATCH SUMMARY:" >> "${SESSION_FILE}" echo "================" >> "${SESSION_FILE}" echo "Original Prompt: ${PROMPT}" >> "${SESSION_FILE}" echo "Selected Mechanism: ${MECHANISM}" >> "${SESSION_FILE}" echo "Reason: ${REASON}" >> "${SESSION_FILE}" echo "Rounds: ${ROUNDS}" >> "${SESSION_FILE}" # End timing duration=$(end_timer "$SESSION_ID" "computer") echo "" echo "Execution time: ${duration} seconds" echo "Full dispatch log: ${SESSION_FILE}" echo "Full dispatch log: ${SESSION_FILE}"