Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,6 +17,8 @@ from PIL import Image
|
|
| 17 |
from cobra import load
|
| 18 |
import time
|
| 19 |
|
|
|
|
|
|
|
| 20 |
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'mamba-ssm'])
|
| 21 |
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'causal-conv1d'])
|
| 22 |
|
|
@@ -31,10 +33,11 @@ else:
|
|
| 31 |
vlm.to(DEVICE, dtype=DTYPE)
|
| 32 |
|
| 33 |
prompt_builder = vlm.get_prompt_builder()
|
| 34 |
-
system_prompt = prompt_builder.system_prompt
|
| 35 |
|
| 36 |
@spaces.GPU
|
| 37 |
-
def bot_streaming(message, history):
|
|
|
|
|
|
|
| 38 |
print(message)
|
| 39 |
if message["files"]:
|
| 40 |
image = message["files"][-1]["path"]
|
|
@@ -47,48 +50,32 @@ def bot_streaming(message, history):
|
|
| 47 |
|
| 48 |
image = Image.open(image).convert("RGB")
|
| 49 |
|
| 50 |
-
prompt_builder.add_turn(role="human", message=message)
|
| 51 |
prompt_text = prompt_builder.get_prompt()
|
| 52 |
-
|
| 53 |
# Generate from the VLM
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
)
|
| 65 |
prompt_builder.add_turn(role="gpt", message=generated_text)
|
| 66 |
|
| 67 |
-
|
| 68 |
-
# generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=100)
|
| 69 |
-
# generation_kwargs = dict(image, prompt_text, cg=True, do_sample=cfg.do_sample, temperature=cfg.temperature, max_new_tokens=cfg.max_new_tokens)
|
| 70 |
-
generation_kwargs = dict(image, prompt_text, cg=True, do_sample=True, temperature=1.0, max_new_tokens=2048)
|
| 71 |
-
|
| 72 |
-
thread = Thread(target=vlm.generate, kwargs=generation_kwargs)
|
| 73 |
-
thread.start()
|
| 74 |
-
|
| 75 |
-
text_prompt =f"[INST] \n{message['text']} [/INST]"
|
| 76 |
-
print(generated_text)
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
buffer = ""
|
| 80 |
yield generated_text
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
{"text": "How to make this pastry?", "files":["./baklava.png"]}],
|
| 92 |
-
description="Try [LLaVA Next](https://huggingface.co/papers/2310.03744) in this demo. Upload an image and start chatting about it, or simply try one of the examples below.",
|
| 93 |
-
stop_btn="Stop Generation", multimodal=True)
|
| 94 |
demo.launch(debug=True)
|
|
|
|
| 17 |
from cobra import load
|
| 18 |
import time
|
| 19 |
|
| 20 |
+
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'packaging'])
|
| 21 |
+
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'ninja'])
|
| 22 |
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'mamba-ssm'])
|
| 23 |
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'causal-conv1d'])
|
| 24 |
|
|
|
|
| 33 |
vlm.to(DEVICE, dtype=DTYPE)
|
| 34 |
|
| 35 |
prompt_builder = vlm.get_prompt_builder()
|
|
|
|
| 36 |
|
| 37 |
@spaces.GPU
|
| 38 |
+
def bot_streaming(message, history, temperature, top_k, max_new_tokens):
|
| 39 |
+
if len(history) == 0:
|
| 40 |
+
prompt_builder.prompt, prompt_builder.turn_count = "", 0
|
| 41 |
print(message)
|
| 42 |
if message["files"]:
|
| 43 |
image = message["files"][-1]["path"]
|
|
|
|
| 50 |
|
| 51 |
image = Image.open(image).convert("RGB")
|
| 52 |
|
| 53 |
+
prompt_builder.add_turn(role="human", message=message['text'])
|
| 54 |
prompt_text = prompt_builder.get_prompt()
|
| 55 |
+
|
| 56 |
# Generate from the VLM
|
| 57 |
+
with torch.no_grad():
|
| 58 |
+
generated_text = vlm.generate(
|
| 59 |
+
image,
|
| 60 |
+
prompt_text,
|
| 61 |
+
cg=True,
|
| 62 |
+
do_sample=True,
|
| 63 |
+
temperature=temperature,
|
| 64 |
+
top_k=top_k,
|
| 65 |
+
max_new_tokens=max_new_tokens,
|
| 66 |
+
)
|
|
|
|
| 67 |
prompt_builder.add_turn(role="gpt", message=generated_text)
|
| 68 |
|
| 69 |
+
time.sleep(0.04)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
yield generated_text
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
demo = gr.ChatInterface(fn=bot_streaming,
|
| 74 |
+
additional_inputs=[gr.Slider(0, 1, value=0.2, label="Temperature"),
|
| 75 |
+
gr.Slider(1, 3, value=1, step=1, label="Top k"),
|
| 76 |
+
gr.Slider(1, 2048, value=256, step=1, label="Max New Tokens")],
|
| 77 |
+
title="Cobra",
|
| 78 |
+
description="Try [Cobra](https://huggingface.co/papers/2403.14520) in this demo. Upload an image and start chatting about it.",
|
| 79 |
+
stop_btn="Stop Generation", multimodal=True,
|
| 80 |
+
examples=[{"text": "Describe this image", "files":["./cobra.png"]}])
|
|
|
|
|
|
|
|
|
|
| 81 |
demo.launch(debug=True)
|