about summary refs log tree commit diff stats
path: root/bash/talk-to-computer/test_framework.sh
diff options
context:
space:
mode:
Diffstat (limited to 'bash/talk-to-computer/test_framework.sh')
-rwxr-xr-xbash/talk-to-computer/test_framework.sh434
1 files changed, 434 insertions, 0 deletions
diff --git a/bash/talk-to-computer/test_framework.sh b/bash/talk-to-computer/test_framework.sh
new file mode 100755
index 0000000..c74ad56
--- /dev/null
+++ b/bash/talk-to-computer/test_framework.sh
@@ -0,0 +1,434 @@
+#!/bin/bash
+
+# Comprehensive Test Framework for AI Thinking Mechanisms
+# This script provides automated testing capabilities for all system components.
+
+# Source common functionality
+source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
+source "$(dirname "${BASH_SOURCE[0]}")/config.sh"
+
+# --- Test Configuration ---
+
+# Test directories
+TEST_DIR="${LOG_DIR}/tests"
+RESULTS_DIR="${TEST_DIR}/results"
+COVERAGE_DIR="${TEST_DIR}/coverage"
+
+# Test counters
+TESTS_PASSED=0
+TESTS_FAILED=0
+TESTS_SKIPPED=0
+
+# --- Test Utilities ---
+
+# Initialize test framework
+init_test_framework() {
+    mkdir -p "$RESULTS_DIR" "$COVERAGE_DIR"
+    echo "๐Ÿงช AI Thinking Mechanisms Test Framework"
+    echo "========================================"
+    echo
+}
+
+# Test result functions
+test_pass() {
+    local test_name="$1"
+    echo "โœ… PASS: $test_name"
+    ((TESTS_PASSED++))
+}
+
+test_fail() {
+    local test_name="$1"
+    local reason="$2"
+    echo "โŒ FAIL: $test_name - $reason"
+    ((TESTS_FAILED++))
+}
+
+test_skip() {
+    local test_name="$1"
+    local reason="$2"
+    echo "โญ๏ธ  SKIP: $test_name - $reason"
+    ((TESTS_SKIPPED++))
+}
+
+# Assert functions
+assert_equals() {
+    local expected="$1"
+    local actual="$2"
+    local test_name="$3"
+
+    if [ "$expected" = "$actual" ]; then
+        test_pass "$test_name"
+    else
+        test_fail "$test_name" "Expected '$expected', got '$actual'"
+    fi
+}
+
+assert_not_empty() {
+    local value="$1"
+    local test_name="$2"
+
+    if [ -n "$value" ]; then
+        test_pass "$test_name"
+    else
+        test_fail "$test_name" "Value is empty"
+    fi
+}
+
+assert_file_exists() {
+    local file_path="$1"
+    local test_name="$2"
+
+    if [ -f "$file_path" ]; then
+        test_pass "$test_name"
+    else
+        test_fail "$test_name" "File does not exist: $file_path"
+    fi
+}
+
+# --- Component Tests ---
+
+test_common_functions() {
+    echo "Testing Common Functions..."
+
+    # Test script directory detection
+    local script_dir
+    script_dir=$(get_script_dir)
+    assert_not_empty "$script_dir" "get_script_dir"
+
+    # Test model validation (if ollama is available)
+    if command_exists ollama; then
+        local result
+        result=$(validate_model "gemma3n:e2b" "gemma3n:e2b")
+        if [ $? -eq 0 ]; then
+            test_pass "validate_model_success"
+        else
+            test_skip "validate_model_success" "Model not available"
+        fi
+    else
+        test_skip "validate_model_success" "Ollama not available"
+    fi
+}
+
+test_config_loading() {
+    echo "Testing Configuration Loading..."
+
+    # Test that config variables are loaded
+    if [ -n "$DEFAULT_MODEL" ]; then
+        test_pass "config_default_model"
+    else
+        test_fail "config_default_model" "DEFAULT_MODEL not set"
+    fi
+
+    if [ -n "$FALLBACK_MODEL" ]; then
+        test_pass "config_fallback_model"
+    else
+        test_fail "config_fallback_model" "FALLBACK_MODEL not set"
+    fi
+
+    # Test model arrays
+    if [ ${#CONSENSUS_MODELS[@]} -gt 0 ]; then
+        test_pass "config_consensus_models"
+    else
+        test_fail "config_consensus_models" "CONSENSUS_MODELS array is empty"
+    fi
+}
+
+test_quality_guard() {
+    echo "Testing Quality Guard..."
+
+    source "./quality_guard.sh"
+
+    # Test quality assessment
+    local test_response="This is a comprehensive answer that should pass quality checks."
+    local quality_score
+    quality_score=$(assess_quality "$test_response" "test prompt" "socratic")
+    assert_not_empty "$quality_score" "assess_quality"
+
+    # Test degradation detection
+    local degradation_score
+    degradation_score=$(detect_degradation_patterns "$test_response")
+    assert_not_empty "$degradation_score" "detect_degradation_patterns"
+
+    # Test degraded response detection
+    local lorem_response="Lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt"
+    local lorem_degradation
+    lorem_degradation=$(detect_degradation_patterns "$lorem_response")
+
+    if (( $(echo "$lorem_degradation > 0" | bc -l 2>/dev/null || echo "0") )); then
+        test_pass "lorem_ipsum_detection"
+    else
+        test_fail "lorem_ipsum_detection" "Failed to detect lorem ipsum pattern"
+    fi
+}
+
+test_logging_system() {
+    echo "Testing Logging System..."
+
+    source "./logging.sh"
+
+    # Test error logging
+    log_error "Test error message"
+    if [ -f "$ERROR_LOG" ]; then
+        test_pass "error_logging"
+    else
+        test_fail "error_logging" "Error log file not created"
+    fi
+
+    # Test validation functions
+    local temp_file
+    temp_file=$(create_managed_temp_file "test" "tmp")
+    echo "test content" > "$temp_file"
+
+    if validate_file_path "$temp_file"; then
+        test_pass "validate_file_path"
+    else
+        test_fail "validate_file_path" "Failed to validate existing file"
+    fi
+
+    # Test invalid file
+    if ! validate_file_path "/nonexistent/file.txt" 2>/dev/null; then
+        test_pass "validate_invalid_file"
+    else
+        test_fail "validate_invalid_file" "Should have failed for nonexistent file"
+    fi
+}
+
+test_resource_management() {
+    echo "Testing Resource Management..."
+
+    source "./common.sh"
+
+    # Test temporary directory creation
+    local temp_dir
+    temp_dir=$(create_managed_temp_dir "test")
+    if [ -d "$temp_dir" ]; then
+        test_pass "create_temp_dir"
+    else
+        test_fail "create_temp_dir" "Failed to create temp directory"
+    fi
+
+    # Test cleanup registration
+    register_cleanup_resource "$temp_dir"
+    if [ ${#CLEANUP_RESOURCES[@]} -gt 0 ]; then
+        test_pass "register_cleanup_resource"
+    else
+        test_fail "register_cleanup_resource" "Resource not registered for cleanup"
+    fi
+}
+
+# --- Integration Tests ---
+
+test_mechanism_integration() {
+    echo "Testing Mechanism Integration..."
+
+    # Test if mechanisms are executable
+    local mechanisms=("socratic" "exploration" "consensus" "critique" "synthesis" "peer-review" "puzzle")
+
+    for mechanism in "${mechanisms[@]}"; do
+        if [ -x "./$mechanism" ]; then
+            test_pass "mechanism_executable_$mechanism"
+        else
+            test_fail "mechanism_executable_$mechanism" "Mechanism not executable"
+        fi
+    done
+}
+
+test_classifier_integration() {
+    echo "Testing Classifier Integration..."
+
+    if [ -x "./classifier.sh" ]; then
+        test_pass "classifier_executable"
+
+        # Test basic classification (if possible without models)
+        local test_result
+        test_result=$(source "./classifier.sh" && analyze_intent_patterns "What are the different approaches to solving this problem?" 2>/dev/null)
+        if [ -n "$test_result" ]; then
+            test_pass "classifier_basic_functionality"
+        else
+            test_skip "classifier_basic_functionality" "Cannot test without models"
+        fi
+    else
+        test_fail "classifier_executable" "Classifier script not executable"
+    fi
+}
+
+# --- Performance Tests ---
+
+test_performance_metrics() {
+    echo "Testing Performance Metrics..."
+
+    source "./logging.sh"
+
+    # Test metrics functions exist
+    if command -v log_session_start >/dev/null 2>&1; then
+        test_pass "performance_functions_available"
+    else
+        test_fail "performance_functions_available" "Performance logging functions not available"
+    fi
+
+    # Test metrics file creation
+    if [ -f "$METRICS_FILE" ] || touch "$METRICS_FILE" 2>/dev/null; then
+        test_pass "metrics_file_accessible"
+    else
+        test_fail "metrics_file_accessible" "Cannot access metrics file"
+    fi
+}
+
+# --- Main Test Runner ---
+
+run_all_tests() {
+    init_test_framework
+
+    echo "Running Test Suite..."
+    echo "====================="
+    echo
+
+    # Unit Tests
+    test_common_functions
+    echo
+
+    test_config_loading
+    echo
+
+    test_quality_guard
+    echo
+
+    test_logging_system
+    echo
+
+    test_resource_management
+    echo
+
+    # Integration Tests
+    test_mechanism_integration
+    echo
+
+    test_classifier_integration
+    echo
+
+    # Performance Tests
+    test_performance_metrics
+    echo
+
+    # Test Summary
+    echo "Test Summary"
+    echo "============"
+    echo "โœ… Passed: $TESTS_PASSED"
+    echo "โŒ Failed: $TESTS_FAILED"
+    echo "โญ๏ธ  Skipped: $TESTS_SKIPPED"
+    echo
+
+    local total_tests=$((TESTS_PASSED + TESTS_FAILED))
+    if [ $total_tests -gt 0 ]; then
+        local pass_rate=$((TESTS_PASSED * 100 / total_tests))
+        echo "Pass Rate: $pass_rate%"
+
+        if [ $TESTS_FAILED -eq 0 ]; then
+            echo "๐ŸŽ‰ All tests completed successfully!"
+            return 0
+        else
+            echo "โš ๏ธ  Some tests failed. Please review the results above."
+            return 1
+        fi
+    else
+        echo "No tests were run."
+        return 1
+    fi
+}
+
+# --- CLI Interface ---
+
+show_help() {
+    echo "AI Thinking Mechanisms Test Framework"
+    echo "Usage: $0 [OPTIONS]"
+    echo
+    echo "Options:"
+    echo "  -a, --all         Run all tests (default)"
+    echo "  -u, --unit        Run only unit tests"
+    echo "  -i, --integration Run only integration tests"
+    echo "  -p, --performance Run only performance tests"
+    echo "  -v, --verbose     Enable verbose output"
+    echo "  -h, --help        Show this help message"
+    echo
+    echo "Examples:"
+    echo "  $0 -a              # Run all tests"
+    echo "  $0 -u -v          # Run unit tests with verbose output"
+    echo "  $0 -p             # Run only performance tests"
+}
+
+# Parse command line arguments
+VERBOSE=false
+TEST_TYPE="all"
+
+while [[ $# -gt 0 ]]; do
+    case $1 in
+        -a|--all)
+            TEST_TYPE="all"
+            shift
+            ;;
+        -u|--unit)
+            TEST_TYPE="unit"
+            shift
+            ;;
+        -i|--integration)
+            TEST_TYPE="integration"
+            shift
+            ;;
+        -p|--performance)
+            TEST_TYPE="performance"
+            shift
+            ;;
+        -v|--verbose)
+            VERBOSE=true
+            shift
+            ;;
+        -h|--help)
+            show_help
+            exit 0
+            ;;
+        *)
+            echo "Unknown option: $1"
+            show_help
+            exit 1
+            ;;
+    esac
+done
+
+# Set verbose output
+if [ "$VERBOSE" = true ]; then
+    set -x
+fi
+
+# Run tests based on type
+case $TEST_TYPE in
+    "all")
+        run_all_tests
+        ;;
+    "unit")
+        init_test_framework
+        test_common_functions
+        echo
+        test_config_loading
+        echo
+        test_quality_guard
+        echo
+        test_logging_system
+        echo
+        test_resource_management
+        ;;
+    "integration")
+        init_test_framework
+        test_mechanism_integration
+        echo
+        test_classifier_integration
+        ;;
+    "performance")
+        init_test_framework
+        test_performance_metrics
+        ;;
+    *)
+        echo "Invalid test type: $TEST_TYPE"
+        show_help
+        exit 1
+        ;;
+esac