about summary refs log tree commit diff stats
path: root/html/matt-chat
diff options
context:
space:
mode:
authorelioat <elioat@tilde.institute>2025-01-05 08:06:13 -0500
committerelioat <elioat@tilde.institute>2025-01-05 08:06:13 -0500
commit01eb4d24bfc0cad32bf2ee50d41a28f42254ba9c (patch)
treef5784581f4d8a0491c16861bfd4f8a07ed388d32 /html/matt-chat
parentc68b2f932f510a40f5c4098242be0ed8ab766d0e (diff)
downloadtour-01eb4d24bfc0cad32bf2ee50d41a28f42254ba9c.tar.gz
*
Diffstat (limited to 'html/matt-chat')
-rw-r--r--html/matt-chat/index.html13
1 files changed, 4 insertions, 9 deletions
diff --git a/html/matt-chat/index.html b/html/matt-chat/index.html
index d802477..3ae641f 100644
--- a/html/matt-chat/index.html
+++ b/html/matt-chat/index.html
@@ -180,12 +180,8 @@
         // though this is relatively naive at the moment
         const config = {
             apiUrl: "http://localhost:11434/v1/chat/completions",
-            models: [
-                { value: "llama3.1:8b", label: "llama3.1:8b, general tasks" },
-                { value: "llama3.2:latest", label: "llama3.2:latest, general stuff" },
-                { value: "qwen2.5-coder:1.5b", label: "qwen2.5-coder:1.5b, fast coding" },
-                { value: "qwen2.5-coder:7b", label: "qwen2.5-coder:7b, fast-ish coding" }
-            ],
+            API_URL: "http://localhost:11434/v1",
+            API_MODELS_ENDPOINT: "http://localhost:11434/v1/models",
             contextWindowSize: 6, // Number of previous exchanges to remember
             systemMessage: "You are a helpful assistant. If you don't know something you'll let me know. Your name is Matt.", // Set the mood and personality for the LLM's responses
             maxTokens: 4096, // Approximate max tokens for most models
@@ -200,8 +196,7 @@
 
         let isCatMode = false; // Flag to track cat mode
 
-        const API_URL = "http://localhost:11434/v1";
-        const API_MODELS_ENDPOINT = `${API_URL}/models`;
+        const API_MODELS_ENDPOINT = config.API_MODELS_ENDPOINT;
 
         // Function to handle errors
         function handleError(message) {
@@ -223,7 +218,7 @@
             const modelIds = [];
 
             try {
-                const response = await fetch(API_MODELS_ENDPOINT);
+                const response = await fetch(config.API_MODELS_ENDPOINT);
                 if (!response.ok) throw new Error('Failed to fetch models');
 
                 const data = await response.json();