about summary refs log tree commit diff stats
path: root/bash/talk-to-computer/config.sh
diff options
context:
space:
mode:
Diffstat (limited to 'bash/talk-to-computer/config.sh')
-rwxr-xr-xbash/talk-to-computer/config.sh126
1 files changed, 126 insertions, 0 deletions
diff --git a/bash/talk-to-computer/config.sh b/bash/talk-to-computer/config.sh
new file mode 100755
index 0000000..ec612cc
--- /dev/null
+++ b/bash/talk-to-computer/config.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+
+# Centralized Configuration File
+# This file contains all model configurations, defaults, and system settings
+# for the AI thinking mechanisms system.
+
+# --- Default Models ---
+
+# Main dispatcher model
+DEFAULT_MODEL="gemma3n:e2b"
+
+# Classification model
+CLASSIFIER_MODEL="gemma3n:e2b"
+
+# --- Thinking Mechanism Models ---
+
+# Exploration mechanism
+EXPLORATION_MODEL="llama3:8b-instruct-q4_K_M"
+ANALYSIS_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
+
+# Consensus mechanism
+CONSENSUS_MODELS=(
+    "llama3:8b-instruct-q4_K_M"
+    "phi3:3.8b-mini-4k-instruct-q4_K_M"
+    "deepseek-r1:1.5b"
+    "gemma3n:e2b"
+    "dolphin3:latest"
+)
+CONSENSUS_JUDGE_MODEL="gemma3n:e2b"
+
+# Socratic mechanism
+SOCRATIC_RESPONSE_MODEL="llama3:8b-instruct-q4_K_M"
+SOCRATIC_QUESTION_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
+
+# Critique mechanism
+CRITIQUE_MODEL="llama3:8b-instruct-q4_K_M"
+
+# Synthesis mechanism
+SYNTHESIS_MODEL="llama3:8b-instruct-q4_K_M"
+
+# Peer Review mechanism
+PEER_REVIEW_MODEL="llama3:8b-instruct-q4_K_M"
+
+# Puzzle mechanism
+PUZZLE_MODEL="llama3:8b-instruct-q4_K_M"
+PUZZLE_ANALYSIS_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
+
+# --- System Settings ---
+
+# Default values
+DEFAULT_ROUNDS=2
+DEFAULT_LANGUAGE="lil"
+
+# Quality Guard settings
+MIN_RESPONSE_LENGTH=30
+MAX_REPETITION_RATIO=0.4
+MAX_NONSENSE_SCORE=0.6
+DEGRADATION_THRESHOLD=0.65
+MAX_CORRECTION_ATTEMPTS=2
+FALLBACK_ENABLED=true
+
+# Logging settings
+LOG_DIR=~/tmp/ai_thinking
+SESSION_LOG="${LOG_DIR}/session_$(date +%Y%m%d_%H%M%S).json"
+ERROR_LOG="${LOG_DIR}/errors.log"
+METRICS_FILE="${LOG_DIR}/performance_metrics.json"
+CLASSIFICATION_LOG="${LOG_DIR}/classification.log"
+
+# Security settings
+MAX_PROMPT_LENGTH=10000
+
+# --- Model Fallbacks ---
+
+# Fallback model for any model that fails validation
+FALLBACK_MODEL="gemma3n:e2b"
+
+# --- Environment Variable Support ---
+
+# Allow overriding models via environment variables
+if [ -n "$AI_DEFAULT_MODEL" ]; then
+    DEFAULT_MODEL="$AI_DEFAULT_MODEL"
+fi
+
+if [ -n "$AI_CLASSIFIER_MODEL" ]; then
+    CLASSIFIER_MODEL="$AI_CLASSIFIER_MODEL"
+fi
+
+if [ -n "$AI_EXPLORATION_MODEL" ]; then
+    EXPLORATION_MODEL="$AI_EXPLORATION_MODEL"
+fi
+
+if [ -n "$AI_ANALYSIS_MODEL" ]; then
+    ANALYSIS_MODEL="$AI_ANALYSIS_MODEL"
+fi
+
+if [ -n "$AI_PUZZLE_MODEL" ]; then
+    PUZZLE_MODEL="$AI_PUZZLE_MODEL"
+fi
+
+# --- Utility Functions ---
+
+# Get a model with fallback support
+get_model_with_fallback() {
+    local primary_model="$1"
+    local fallback_model="$2"
+
+    if [ -n "$primary_model" ]; then
+        echo "$primary_model"
+    else
+        echo "$fallback_model"
+    fi
+}
+
+# Validate if a model is available
+is_model_available() {
+    local model="$1"
+    ollama list 2>/dev/null | grep -q "$model"
+}
+
+# Export all configuration variables
+export DEFAULT_MODEL CLASSIFIER_MODEL EXPLORATION_MODEL ANALYSIS_MODEL
+export CONSENSUS_MODELS CONSENSUS_JUDGE_MODEL SOCRATIC_RESPONSE_MODEL SOCRATIC_QUESTION_MODEL
+export CRITIQUE_MODEL SYNTHESIS_MODEL PEER_REVIEW_MODEL PUZZLE_MODEL PUZZLE_ANALYSIS_MODEL
+export DEFAULT_ROUNDS DEFAULT_LANGUAGE MIN_RESPONSE_LENGTH MAX_REPETITION_RATIO
+export MAX_NONSENSE_SCORE DEGRADATION_THRESHOLD MAX_CORRECTION_ATTEMPTS FALLBACK_ENABLED
+export LOG_DIR SESSION_LOG ERROR_LOG METRICS_FILE CLASSIFICATION_LOG MAX_PROMPT_LENGTH FALLBACK_MODEL