blob: ec612cc5ed742643e70408e1c9fcca8eaf2624a4 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
#!/bin/bash
# Centralized Configuration File
# This file contains all model configurations, defaults, and system settings
# for the AI thinking mechanisms system.
# --- Default Models ---
# Main dispatcher model
DEFAULT_MODEL="gemma3n:e2b"
# Classification model
CLASSIFIER_MODEL="gemma3n:e2b"
# --- Thinking Mechanism Models ---
# Exploration mechanism
EXPLORATION_MODEL="llama3:8b-instruct-q4_K_M"
ANALYSIS_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
# Consensus mechanism
CONSENSUS_MODELS=(
"llama3:8b-instruct-q4_K_M"
"phi3:3.8b-mini-4k-instruct-q4_K_M"
"deepseek-r1:1.5b"
"gemma3n:e2b"
"dolphin3:latest"
)
CONSENSUS_JUDGE_MODEL="gemma3n:e2b"
# Socratic mechanism
SOCRATIC_RESPONSE_MODEL="llama3:8b-instruct-q4_K_M"
SOCRATIC_QUESTION_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
# Critique mechanism
CRITIQUE_MODEL="llama3:8b-instruct-q4_K_M"
# Synthesis mechanism
SYNTHESIS_MODEL="llama3:8b-instruct-q4_K_M"
# Peer Review mechanism
PEER_REVIEW_MODEL="llama3:8b-instruct-q4_K_M"
# Puzzle mechanism
PUZZLE_MODEL="llama3:8b-instruct-q4_K_M"
PUZZLE_ANALYSIS_MODEL="phi3:3.8b-mini-4k-instruct-q4_K_M"
# --- System Settings ---
# Default values
DEFAULT_ROUNDS=2
DEFAULT_LANGUAGE="lil"
# Quality Guard settings
MIN_RESPONSE_LENGTH=30
MAX_REPETITION_RATIO=0.4
MAX_NONSENSE_SCORE=0.6
DEGRADATION_THRESHOLD=0.65
MAX_CORRECTION_ATTEMPTS=2
FALLBACK_ENABLED=true
# Logging settings
LOG_DIR=~/tmp/ai_thinking
SESSION_LOG="${LOG_DIR}/session_$(date +%Y%m%d_%H%M%S).json"
ERROR_LOG="${LOG_DIR}/errors.log"
METRICS_FILE="${LOG_DIR}/performance_metrics.json"
CLASSIFICATION_LOG="${LOG_DIR}/classification.log"
# Security settings
MAX_PROMPT_LENGTH=10000
# --- Model Fallbacks ---
# Fallback model for any model that fails validation
FALLBACK_MODEL="gemma3n:e2b"
# --- Environment Variable Support ---
# Allow overriding models via environment variables
if [ -n "$AI_DEFAULT_MODEL" ]; then
DEFAULT_MODEL="$AI_DEFAULT_MODEL"
fi
if [ -n "$AI_CLASSIFIER_MODEL" ]; then
CLASSIFIER_MODEL="$AI_CLASSIFIER_MODEL"
fi
if [ -n "$AI_EXPLORATION_MODEL" ]; then
EXPLORATION_MODEL="$AI_EXPLORATION_MODEL"
fi
if [ -n "$AI_ANALYSIS_MODEL" ]; then
ANALYSIS_MODEL="$AI_ANALYSIS_MODEL"
fi
if [ -n "$AI_PUZZLE_MODEL" ]; then
PUZZLE_MODEL="$AI_PUZZLE_MODEL"
fi
# --- Utility Functions ---
# Get a model with fallback support
get_model_with_fallback() {
local primary_model="$1"
local fallback_model="$2"
if [ -n "$primary_model" ]; then
echo "$primary_model"
else
echo "$fallback_model"
fi
}
# Validate if a model is available
is_model_available() {
local model="$1"
ollama list 2>/dev/null | grep -q "$model"
}
# Export all configuration variables
export DEFAULT_MODEL CLASSIFIER_MODEL EXPLORATION_MODEL ANALYSIS_MODEL
export CONSENSUS_MODELS CONSENSUS_JUDGE_MODEL SOCRATIC_RESPONSE_MODEL SOCRATIC_QUESTION_MODEL
export CRITIQUE_MODEL SYNTHESIS_MODEL PEER_REVIEW_MODEL PUZZLE_MODEL PUZZLE_ANALYSIS_MODEL
export DEFAULT_ROUNDS DEFAULT_LANGUAGE MIN_RESPONSE_LENGTH MAX_REPETITION_RATIO
export MAX_NONSENSE_SCORE DEGRADATION_THRESHOLD MAX_CORRECTION_ATTEMPTS FALLBACK_ENABLED
export LOG_DIR SESSION_LOG ERROR_LOG METRICS_FILE CLASSIFICATION_LOG MAX_PROMPT_LENGTH FALLBACK_MODEL
|