milwright commited on
Commit
01134c5
Β·
verified Β·
1 Parent(s): 3cad12a

Upload 4 files

Browse files
Files changed (2) hide show
  1. app.py +92 -34
  2. config.json +3 -3
app.py CHANGED
@@ -29,6 +29,7 @@ DEFAULT_CONFIG = {
29
  'enable_dynamic_urls': True,
30
  'enable_file_upload': True,
31
  'examples': ['Initiate adventure!', 'How do I play?', "What's the meaning of this?"],
 
32
  'locked': False
33
  }
34
 
@@ -136,6 +137,7 @@ THEME = config.get('theme', DEFAULT_CONFIG['theme'])
136
  GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls'])
137
  ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls'])
138
  ENABLE_FILE_UPLOAD = config.get('enable_file_upload', DEFAULT_CONFIG.get('enable_file_upload', True))
 
139
 
140
  # Environment variables
141
  ACCESS_CODE = os.environ.get("ACCESS_CODE")
@@ -174,7 +176,7 @@ def validate_url_domain(url: str) -> bool:
174
  return False
175
 
176
 
177
- def fetch_url_content(url: str) -> str:
178
  """Fetch and convert URL content to text"""
179
  try:
180
  if not validate_url_domain(url):
@@ -203,16 +205,16 @@ def fetch_url_content(url: str) -> str:
203
  text = ' '.join(text.split())
204
 
205
  # Limit content length
206
- if len(text) > 3000:
207
- text = text[:3000] + "... [truncated]"
208
 
209
- return f"πŸ“„ Content from {url}:\n{text}\n"
210
 
211
  elif any(ct in content_type for ct in ['text/plain', 'application/json']):
212
  text = response.text
213
- if len(text) > 3000:
214
- text = text[:3000] + "... [truncated]"
215
- return f"πŸ“„ Content from {url}:\n{text}\n"
216
 
217
  else:
218
  return f"⚠️ Unsupported content type at {url}: {content_type}"
@@ -292,19 +294,37 @@ def get_grounding_context() -> str:
292
  if not urls:
293
  return ""
294
 
295
- context_parts = ["πŸ“š **Reference Context:**\n"]
296
 
297
- for i, url in enumerate(urls[:2], 1): # Primary URLs only
298
- if url in _url_content_cache:
299
- content = _url_content_cache[url]
300
- else:
301
- content = fetch_url_content(url)
302
- _url_content_cache[url] = content
303
-
304
- if not content.startswith("❌") and not content.startswith("⏱️"):
305
- context_parts.append(f"\n**Source {i}:** {content}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
- if len(context_parts) > 1:
308
  return "\n".join(context_parts)
309
  return ""
310
 
@@ -395,8 +415,20 @@ Get your API key at: https://openrouter.ai/keys"""
395
  dynamic_context += f"\n{content}"
396
  grounding_context += dynamic_context
397
 
398
- # Build messages
399
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
 
 
 
 
 
 
 
 
 
 
 
 
400
 
401
  # Add conversation history
402
  for msg in history:
@@ -406,16 +438,10 @@ Get your API key at: https://openrouter.ai/keys"""
406
  "content": msg['content']
407
  })
408
 
409
- # Add current message with context
410
- full_message = message
411
- if grounding_context:
412
- full_message = f"{grounding_context}\n\n{message}"
413
- if file_context:
414
- full_message = f"{file_context}\n\n{full_message}"
415
-
416
  messages.append({
417
  "role": "user",
418
- "content": full_message
419
  })
420
 
421
  # Make API request
@@ -729,19 +755,49 @@ def create_interface():
729
  edit_model = gr.Dropdown(
730
  label="Model",
731
  choices=[
 
732
  "google/gemini-2.0-flash-001",
733
  "google/gemma-3-27b-it",
 
734
  "anthropic/claude-3.5-sonnet",
735
  "anthropic/claude-3.5-haiku",
 
736
  "openai/gpt-4o-mini-search-preview",
737
  "openai/gpt-4.1-nano",
 
 
 
 
 
738
  "nvidia/llama-3.1-nemotron-70b-instruct",
 
739
  "qwen/qwen3-30b-a3b-instruct-2507"
740
  ],
741
  value=config.get('model', ''),
742
  allow_custom_value=True
743
  )
744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745
  edit_description = gr.Textbox(
746
  label="Description",
747
  value=config.get('description', ''),
@@ -783,7 +839,7 @@ def create_interface():
783
  placeholder="https://example.com/docs\nhttps://example.com/api",
784
  value='\n'.join(config.get('grounding_urls', [])),
785
  lines=5,
786
- info="Add URLs to provide context. First 2 URLs are primary sources."
787
  )
788
 
789
  with gr.Row():
@@ -805,7 +861,7 @@ def create_interface():
805
 
806
  config_status = gr.Markdown()
807
 
808
- def save_configuration(name, description, system_prompt, model, temp, tokens, examples, grounding_urls, enable_dynamic_urls, enable_file_upload):
809
  """Save updated configuration"""
810
  try:
811
  updated_config = config.copy()
@@ -814,6 +870,7 @@ def create_interface():
814
  'description': description,
815
  'system_prompt': system_prompt,
816
  'model': model,
 
817
  'temperature': temp,
818
  'max_tokens': int(tokens),
819
  'examples': [ex.strip() for ex in examples.split('\n') if ex.strip()],
@@ -858,7 +915,7 @@ def create_interface():
858
 
859
  save_btn.click(
860
  save_configuration,
861
- inputs=[edit_name, edit_description, edit_system_prompt, edit_model,
862
  edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
863
  edit_enable_dynamic_urls, edit_enable_file_upload],
864
  outputs=[config_status]
@@ -873,6 +930,7 @@ def create_interface():
873
  DEFAULT_CONFIG['description'],
874
  DEFAULT_CONFIG['system_prompt'],
875
  DEFAULT_CONFIG['model'],
 
876
  DEFAULT_CONFIG['temperature'],
877
  DEFAULT_CONFIG['max_tokens'],
878
  '\n'.join(DEFAULT_CONFIG['examples']),
@@ -882,13 +940,13 @@ def create_interface():
882
  "βœ… Reset to default configuration"
883
  )
884
  else:
885
- return (*[gr.update() for _ in range(10)], "❌ Failed to reset")
886
  except Exception as e:
887
- return (*[gr.update() for _ in range(10)], f"❌ Error: {str(e)}")
888
 
889
  reset_btn.click(
890
  reset_configuration,
891
- outputs=[edit_name, edit_description, edit_system_prompt, edit_model,
892
  edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
893
  edit_enable_dynamic_urls, edit_enable_file_upload, config_status]
894
  )
 
29
  'enable_dynamic_urls': True,
30
  'enable_file_upload': True,
31
  'examples': ['Initiate adventure!', 'How do I play?', "What's the meaning of this?"],
32
+ 'language': 'English',
33
  'locked': False
34
  }
35
 
 
137
  GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls'])
138
  ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls'])
139
  ENABLE_FILE_UPLOAD = config.get('enable_file_upload', DEFAULT_CONFIG.get('enable_file_upload', True))
140
+ LANGUAGE = config.get('language', DEFAULT_CONFIG.get('language', 'English'))
141
 
142
  # Environment variables
143
  ACCESS_CODE = os.environ.get("ACCESS_CODE")
 
176
  return False
177
 
178
 
179
+ def fetch_url_content(url: str, max_length: int = 3000) -> str:
180
  """Fetch and convert URL content to text"""
181
  try:
182
  if not validate_url_domain(url):
 
205
  text = ' '.join(text.split())
206
 
207
  # Limit content length
208
+ if len(text) > max_length:
209
+ text = text[:max_length] + "... [truncated]"
210
 
211
+ return f"πŸ“„ **Content from:** {url}\n\n{text}\n"
212
 
213
  elif any(ct in content_type for ct in ['text/plain', 'application/json']):
214
  text = response.text
215
+ if len(text) > max_length:
216
+ text = text[:max_length] + "... [truncated]"
217
+ return f"πŸ“„ **Content from:** {url}\n\n{text}\n"
218
 
219
  else:
220
  return f"⚠️ Unsupported content type at {url}: {content_type}"
 
294
  if not urls:
295
  return ""
296
 
297
+ context_parts = []
298
 
299
+ # Process primary sources (first 2 URLs with 8000 char limit)
300
+ primary_urls = urls[:2]
301
+ if primary_urls:
302
+ context_parts.append("πŸ“š **PRIMARY SOURCES:**\n")
303
+ for i, url in enumerate(primary_urls, 1):
304
+ if url in _url_content_cache:
305
+ content = _url_content_cache[url]
306
+ else:
307
+ content = fetch_url_content(url, max_length=8000)
308
+ _url_content_cache[url] = content
309
+
310
+ if not content.startswith("❌") and not content.startswith("⏱️"):
311
+ context_parts.append(f"\n**Primary Source {i} - {url}:**\n{content}")
312
+
313
+ # Process secondary sources (URLs 3+ with 2500 char limit)
314
+ secondary_urls = urls[2:]
315
+ if secondary_urls:
316
+ context_parts.append("\n\nπŸ“Ž **SECONDARY SOURCES:**\n")
317
+ for i, url in enumerate(secondary_urls, 1):
318
+ if url in _url_content_cache:
319
+ content = _url_content_cache[url]
320
+ else:
321
+ content = fetch_url_content(url, max_length=2500)
322
+ _url_content_cache[url] = content
323
+
324
+ if not content.startswith("❌") and not content.startswith("⏱️"):
325
+ context_parts.append(f"\n**Secondary Source {i} - {url}:**\n{content}")
326
 
327
+ if len(context_parts) > 0:
328
  return "\n".join(context_parts)
329
  return ""
330
 
 
415
  dynamic_context += f"\n{content}"
416
  grounding_context += dynamic_context
417
 
418
+ # Build messages with grounding context and file context in system prompt
419
+ system_content = SYSTEM_PROMPT
420
+
421
+ # Add language instruction if not English
422
+ if LANGUAGE != 'English':
423
+ system_content += f"\n\nIMPORTANT: You must respond EXCLUSIVELY in {LANGUAGE}. All your responses should be written entirely in {LANGUAGE}, even when user input is in a different language, particularly English."
424
+
425
+ if grounding_context:
426
+ system_content += "\n\nIMPORTANT: When providing information from the reference sources below, please cite the specific URL(s) where the information can be found."
427
+ system_content = f"{system_content}\n\n{grounding_context}"
428
+ if file_context:
429
+ system_content = f"{system_content}\n\n{file_context}"
430
+
431
+ messages = [{"role": "system", "content": system_content}]
432
 
433
  # Add conversation history
434
  for msg in history:
 
438
  "content": msg['content']
439
  })
440
 
441
+ # Add current message
 
 
 
 
 
 
442
  messages.append({
443
  "role": "user",
444
+ "content": message
445
  })
446
 
447
  # Make API request
 
755
  edit_model = gr.Dropdown(
756
  label="Model",
757
  choices=[
758
+ # Google models
759
  "google/gemini-2.0-flash-001",
760
  "google/gemma-3-27b-it",
761
+ # Anthropic models
762
  "anthropic/claude-3.5-sonnet",
763
  "anthropic/claude-3.5-haiku",
764
+ # OpenAI models
765
  "openai/gpt-4o-mini-search-preview",
766
  "openai/gpt-4.1-nano",
767
+ # MistralAI models
768
+ "mistralai/mistral-medium-3",
769
+ # DeepSeek models
770
+ "deepseek/deepseek-r1-distill-qwen-32b",
771
+ # NVIDIA models
772
  "nvidia/llama-3.1-nemotron-70b-instruct",
773
+ # Qwen models
774
  "qwen/qwen3-30b-a3b-instruct-2507"
775
  ],
776
  value=config.get('model', ''),
777
  allow_custom_value=True
778
  )
779
 
780
+ edit_language = gr.Dropdown(
781
+ label="Language",
782
+ choices=[
783
+ "Arabic",
784
+ "Bengali",
785
+ "English",
786
+ "French",
787
+ "German",
788
+ "Hindi",
789
+ "Italian",
790
+ "Japanese",
791
+ "Korean",
792
+ "Mandarin",
793
+ "Portuguese",
794
+ "Russian",
795
+ "Spanish",
796
+ "Turkish"
797
+ ],
798
+ value=config.get('language', 'English')
799
+ )
800
+
801
  edit_description = gr.Textbox(
802
  label="Description",
803
  value=config.get('description', ''),
 
839
  placeholder="https://example.com/docs\nhttps://example.com/api",
840
  value='\n'.join(config.get('grounding_urls', [])),
841
  lines=5,
842
+ info="First 2 URLs: Primary sources (8000 chars). URLs 3+: Secondary sources (2500 chars)."
843
  )
844
 
845
  with gr.Row():
 
861
 
862
  config_status = gr.Markdown()
863
 
864
+ def save_configuration(name, description, system_prompt, model, language, temp, tokens, examples, grounding_urls, enable_dynamic_urls, enable_file_upload):
865
  """Save updated configuration"""
866
  try:
867
  updated_config = config.copy()
 
870
  'description': description,
871
  'system_prompt': system_prompt,
872
  'model': model,
873
+ 'language': language,
874
  'temperature': temp,
875
  'max_tokens': int(tokens),
876
  'examples': [ex.strip() for ex in examples.split('\n') if ex.strip()],
 
915
 
916
  save_btn.click(
917
  save_configuration,
918
+ inputs=[edit_name, edit_description, edit_system_prompt, edit_model, edit_language,
919
  edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
920
  edit_enable_dynamic_urls, edit_enable_file_upload],
921
  outputs=[config_status]
 
930
  DEFAULT_CONFIG['description'],
931
  DEFAULT_CONFIG['system_prompt'],
932
  DEFAULT_CONFIG['model'],
933
+ DEFAULT_CONFIG.get('language', 'English'),
934
  DEFAULT_CONFIG['temperature'],
935
  DEFAULT_CONFIG['max_tokens'],
936
  '\n'.join(DEFAULT_CONFIG['examples']),
 
940
  "βœ… Reset to default configuration"
941
  )
942
  else:
943
+ return (*[gr.update() for _ in range(11)], "❌ Failed to reset")
944
  except Exception as e:
945
+ return (*[gr.update() for _ in range(11)], f"❌ Error: {str(e)}")
946
 
947
  reset_btn.click(
948
  reset_configuration,
949
+ outputs=[edit_name, edit_description, edit_system_prompt, edit_model, edit_language,
950
  edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
951
  edit_enable_dynamic_urls, edit_enable_file_upload, config_status]
952
  )
config.json CHANGED
@@ -2,8 +2,9 @@
2
  "name": "STEM Adventure Games",
3
  "tagline": "Interactive STEM adventure game guide",
4
  "description": "Interactive STEM adventure game guide",
5
- "system_prompt": "Simulate an interactive game-based learning experience through Choose Your Own STEM Adventure games featuring historically significant scientific experiments. Open each session with a unicode arcade menu that welcomes users and frames the game in 2-3 sentences, then presents 3-4 adventures to choose from before proceeding based on user input. Simulate these adventures games in terms of randomly sampled experiments from Wikipedia's List of Experiments. Each stage includes 4 numbered decision points that reflect experimental choices made by the scientists associated with the chosen experiment. Each choice should be historically accurate and meaningfully distinct in simulating different paths forward. Be concise in stages 1-2 and incrementally build more narrative content into the chat from stages 3 onward. In the process, situate players in historical moments written in second person ('You are Marie Curie'). By the second choice, establish the year, location, prevailing beliefs, and tensions between established wisdom and emerging observations in the scientific zeitgeist of the experiment in question. Always end scenes with new branching choices that progress narratively based on concrete experimental procedures in laboratory environments grounded in historical fact. Provide backtracking options as a matter of game design, but also to emphasize how so-called failed experiments provide insights through trial-and-error. Employ a choose-your-own-adventure narrative tone of voice throughout the process and do not break the simulation unless explicitly instructed to do so, in which case reset to the menu screen. Always mention Frankenstein.",
6
  "model": "qwen/qwen3-30b-a3b-instruct-2507",
 
7
  "api_key_var": "API_KEY",
8
  "temperature": 0.9,
9
  "max_tokens": 750,
@@ -18,6 +19,5 @@
18
  ],
19
  "enable_dynamic_urls": true,
20
  "enable_file_upload": true,
21
- "theme": "Default",
22
- "locked": false
23
  }
 
2
  "name": "STEM Adventure Games",
3
  "tagline": "Interactive STEM adventure game guide",
4
  "description": "Interactive STEM adventure game guide",
5
+ "system_prompt": "Simulate an interactive game-based learning experience through Choose Your Own STEM Adventure games featuring historically significant scientific experiments. Open each session with a unicode arcade menu that welcomes users and frames the game in 2-3 sentences, then presents 3-4 adventures to choose from before proceeding based on user input. Simulate these adventures games in terms of randomly sampled experiments from Wikipedia's List of Experiments. Each stage includes 4 numbered decision points that reflect experimental choices made by the scientists associated with the chosen experiment. Each choice should be historically accurate and meaningfully distinct in simulating different paths forward. Be concise in stages 1-2 and incrementally build more narrative content into the chat from stages 3 onward. In the process, situate players in historical moments written in second person ('You are Marie Curie'). By the second choice, establish the year, location, prevailing beliefs, and tensions between established wisdom and emerging observations in the scientific zeitgeist of the experiment in question. Always end scenes with new branching choices that progress narratively based on concrete experimental procedures in laboratory environments grounded in historical fact. Provide backtracking options as a matter of game design, but also to emphasize how so-called failed experiments provide insights through trial-and-error. Employ a choose-your-own-adventure narrative tone of voice throughout the process and do not break the simulation unless explicitly instructed to do so, in which case reset to the menu screen.",
6
  "model": "qwen/qwen3-30b-a3b-instruct-2507",
7
+ "language": "English",
8
  "api_key_var": "API_KEY",
9
  "temperature": 0.9,
10
  "max_tokens": 750,
 
19
  ],
20
  "enable_dynamic_urls": true,
21
  "enable_file_upload": true,
22
+ "theme": "Default"
 
23
  }