Spaces:
Paused
Paused
ai: Allow reasoning/non on audio/image generation.
Browse files* Keep using non-reasoning mode if the response fails.
- src/client/chat_handler.py +14 -2
- src/client/responses/audio.py +13 -7
- src/client/responses/image.py +13 -7
src/client/chat_handler.py
CHANGED
|
@@ -77,7 +77,13 @@ async def respond(
|
|
| 77 |
new_history, # Conversation history
|
| 78 |
session_id, # Session ID
|
| 79 |
selected_model, # Selected model
|
| 80 |
-
jarvis # AI backend function
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
):
|
| 82 |
yield audio_response # Yield audio response
|
| 83 |
return # Exit function after handling audio
|
|
@@ -89,7 +95,13 @@ async def respond(
|
|
| 89 |
new_history, # Conversation history
|
| 90 |
session_id, # Session ID
|
| 91 |
selected_model, # Selected model
|
| 92 |
-
jarvis # AI backend function
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
):
|
| 94 |
yield image_response # Yield image response
|
| 95 |
return # Exit function after handling image
|
|
|
|
| 77 |
new_history, # Conversation history
|
| 78 |
session_id, # Session ID
|
| 79 |
selected_model, # Selected model
|
| 80 |
+
jarvis, # AI backend function
|
| 81 |
+
mode, # Mode for AI response
|
| 82 |
+
temperature, # temperature parameter
|
| 83 |
+
top_k, # top_k parameter
|
| 84 |
+
min_p, # min_p parameter
|
| 85 |
+
top_p, # top_p parameter
|
| 86 |
+
repetition_penalty # repetition_penalty parameter
|
| 87 |
):
|
| 88 |
yield audio_response # Yield audio response
|
| 89 |
return # Exit function after handling audio
|
|
|
|
| 95 |
new_history, # Conversation history
|
| 96 |
session_id, # Session ID
|
| 97 |
selected_model, # Selected model
|
| 98 |
+
jarvis, # AI backend function
|
| 99 |
+
mode, # Mode for AI response
|
| 100 |
+
temperature, # temperature parameter
|
| 101 |
+
top_k, # top_k parameter
|
| 102 |
+
min_p, # min_p parameter
|
| 103 |
+
top_p, # top_p parameter
|
| 104 |
+
repetition_penalty # repetition_penalty parameter
|
| 105 |
):
|
| 106 |
yield image_response # Yield image response
|
| 107 |
return # Exit function after handling image
|
src/client/responses/audio.py
CHANGED
|
@@ -12,7 +12,13 @@ async def audio_integration(
|
|
| 12 |
new_history, # Conversation history in message format
|
| 13 |
session_id, # Session ID for conversation context
|
| 14 |
selected_model, # Selected AI model for generation
|
| 15 |
-
jarvis # AI backend function for generating responses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
):
|
| 17 |
# Extract the audio instruction text after the '/audio' command prefix and strip whitespace
|
| 18 |
audio_instruction = input[6:].strip() # Get instruction after /audio
|
|
@@ -58,12 +64,12 @@ async def audio_integration(
|
|
| 58 |
model=selected_model, # Selected model
|
| 59 |
history=audio_generation_result, # Updated history with audio result
|
| 60 |
user_message=input, # User input
|
| 61 |
-
mode=
|
| 62 |
-
temperature=
|
| 63 |
-
top_k=
|
| 64 |
-
min_p=
|
| 65 |
-
top_p=
|
| 66 |
-
repetition_penalty=
|
| 67 |
):
|
| 68 |
yield [{"role": "tool", "content": audio_description}] # Yield audio description in tool role
|
| 69 |
return # Exit after handling audio
|
|
|
|
| 12 |
new_history, # Conversation history in message format
|
| 13 |
session_id, # Session ID for conversation context
|
| 14 |
selected_model, # Selected AI model for generation
|
| 15 |
+
jarvis, # AI backend function for generating responses
|
| 16 |
+
mode, # Mode for AI response generation
|
| 17 |
+
temperature, # Temperature parameter for AI
|
| 18 |
+
top_k, # Top-k parameter for AI
|
| 19 |
+
min_p, # Min-p parameter for AI
|
| 20 |
+
top_p, # Top-p parameter for AI
|
| 21 |
+
repetition_penalty # Repetition penalty for AI
|
| 22 |
):
|
| 23 |
# Extract the audio instruction text after the '/audio' command prefix and strip whitespace
|
| 24 |
audio_instruction = input[6:].strip() # Get instruction after /audio
|
|
|
|
| 64 |
model=selected_model, # Selected model
|
| 65 |
history=audio_generation_result, # Updated history with audio result
|
| 66 |
user_message=input, # User input
|
| 67 |
+
mode=mode, # Mode for AI response
|
| 68 |
+
temperature=temperature, # temperature parameter
|
| 69 |
+
top_k=top_k, # top_k parameter
|
| 70 |
+
min_p=min_p, # min_p parameter
|
| 71 |
+
top_p=top_p, # top_p parameter
|
| 72 |
+
repetition_penalty=repetition_penalty # repetition_penalty parameter
|
| 73 |
):
|
| 74 |
yield [{"role": "tool", "content": audio_description}] # Yield audio description in tool role
|
| 75 |
return # Exit after handling audio
|
src/client/responses/image.py
CHANGED
|
@@ -12,7 +12,13 @@ async def image_integration(
|
|
| 12 |
new_history, # Conversation history in message format
|
| 13 |
session_id, # Session ID for conversation context
|
| 14 |
selected_model, # Selected AI model for generation
|
| 15 |
-
jarvis # AI backend function for generating responses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
):
|
| 17 |
# Extract the image generation instruction after the '/image' command prefix and strip whitespace
|
| 18 |
generate_image_instruction = input[6:].strip() # Get instruction after /image
|
|
@@ -58,12 +64,12 @@ async def image_integration(
|
|
| 58 |
model=selected_model, # Selected model
|
| 59 |
history=image_generation_result, # Updated history with image result
|
| 60 |
user_message=input, # User input
|
| 61 |
-
mode=
|
| 62 |
-
temperature=
|
| 63 |
-
top_k=
|
| 64 |
-
min_p=
|
| 65 |
-
top_p=
|
| 66 |
-
repetition_penalty=
|
| 67 |
):
|
| 68 |
yield [{"role": "tool", "content": image_description}] # Yield image description in tool role
|
| 69 |
return # Exit after handling image
|
|
|
|
| 12 |
new_history, # Conversation history in message format
|
| 13 |
session_id, # Session ID for conversation context
|
| 14 |
selected_model, # Selected AI model for generation
|
| 15 |
+
jarvis, # AI backend function for generating responses
|
| 16 |
+
mode, # Mode for AI response generation
|
| 17 |
+
temperature, # Temperature parameter for AI
|
| 18 |
+
top_k, # Top-k parameter for AI
|
| 19 |
+
min_p, # Min-p parameter for AI
|
| 20 |
+
top_p, # Top-p parameter for AI
|
| 21 |
+
repetition_penalty # Repetition penalty for AI
|
| 22 |
):
|
| 23 |
# Extract the image generation instruction after the '/image' command prefix and strip whitespace
|
| 24 |
generate_image_instruction = input[6:].strip() # Get instruction after /image
|
|
|
|
| 64 |
model=selected_model, # Selected model
|
| 65 |
history=image_generation_result, # Updated history with image result
|
| 66 |
user_message=input, # User input
|
| 67 |
+
mode=mode, # Mode for AI response
|
| 68 |
+
temperature=temperature, # temperature parameter
|
| 69 |
+
top_k=top_k, # top_k parameter
|
| 70 |
+
min_p=min_p, # min_p parameter
|
| 71 |
+
top_p=top_p, # top_p parameter
|
| 72 |
+
repetition_penalty=repetition_penalty # repetition_penalty parameter
|
| 73 |
):
|
| 74 |
yield [{"role": "tool", "content": image_description}] # Yield image description in tool role
|
| 75 |
return # Exit after handling image
|