Spaces:
Runtime error
Runtime error
| import argparse | |
| import time | |
| from PIL import Image | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer | |
| from transformers import StoppingCriteria, StoppingCriteriaList | |
| import dataclasses | |
| from enum import auto, Enum | |
| from typing import List, Tuple, Any | |
| class SeparatorStyle(Enum): | |
| """Different separator style.""" | |
| SINGLE = auto() | |
| TWO = auto() | |
| THREE = auto() | |
| class Conversation: | |
| """A class that keeps all conversation history.""" | |
| system: str | |
| roles: List[str] | |
| messages: List[List[str]] | |
| offset: int | |
| # system_img: List[Image.Image] = [] | |
| sep_style: SeparatorStyle = SeparatorStyle.SINGLE | |
| sep: str = "###" | |
| sep2: str = None | |
| skip_next: bool = False | |
| conv_id: Any = None | |
| def get_prompt(self): | |
| if self.sep_style == SeparatorStyle.SINGLE: | |
| ret = self.system + self.sep | |
| for role, message in self.messages: | |
| if message: | |
| ret += role + ": " + message + self.sep | |
| else: | |
| ret += role + ":" | |
| return ret | |
| elif self.sep_style == SeparatorStyle.TWO: | |
| seps = [self.sep, self.sep2] | |
| ret = self.system + seps[0] | |
| for i, (role, message) in enumerate(self.messages): | |
| if message: | |
| ret += role + ": " + message + seps[i % 2] | |
| else: | |
| ret += role + ":" | |
| return ret | |
| elif self.sep_style == SeparatorStyle.THREE: | |
| ret = self.system | |
| for i, (role, message) in enumerate(self.messages): | |
| if message: | |
| if type(message) == list: | |
| message = message[0] | |
| ret += role + ": " + message | |
| else: | |
| ret += role + ":" | |
| return ret | |
| else: | |
| raise ValueError(f"Invalid style: {self.sep_style}") | |
| def append_message(self, role, message): | |
| self.messages.append([role, message]) | |
| def to_gradio_chatbot(self): | |
| print('to_gradio_chatbot') | |
| print(self.messages) | |
| ret = [] | |
| for i, (role, msg) in enumerate(self.messages[self.offset:]): | |
| if i % 2 == 0: | |
| ret.append([msg, None]) | |
| else: | |
| ret[-1][-1] = msg | |
| return ret | |
| def copy(self): | |
| return Conversation( | |
| system=self.system, | |
| # system_img=self.system_img, | |
| roles=self.roles, | |
| messages=[[x, y] for x, y in self.messages], | |
| offset=self.offset, | |
| sep_style=self.sep_style, | |
| sep=self.sep, | |
| sep2=self.sep2, | |
| conv_id=self.conv_id) | |
| def dict(self): | |
| return { | |
| "system": self.system, | |
| # "system_img": self.system_img, | |
| "roles": self.roles, | |
| "messages": self.messages, | |
| "offset": self.offset, | |
| "sep": self.sep, | |
| "sep2": self.sep2, | |
| "conv_id": self.conv_id, | |
| } | |
| class StoppingCriteriaSub(StoppingCriteria): | |
| def __init__(self, stops=[], encounters=1): | |
| super().__init__() | |
| self.stops = stops | |
| def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): | |
| for stop in self.stops: | |
| if torch.all((stop == input_ids[0][-len(stop):])).item(): | |
| return True | |
| return False | |
| CONV_VISION = Conversation( | |
| system="A chat between human who asks question and you give helpful, detailed, and insightful answers to his question.", | |
| roles=(" Question", " Answer"), | |
| messages=[], | |
| offset=2, | |
| sep_style=SeparatorStyle.THREE, | |
| sep="###", | |
| ) | |
| CONV_DIRECT= Conversation( | |
| system="", | |
| roles=("", ""), | |
| messages=[], | |
| offset=2, | |
| sep_style=SeparatorStyle.THREE, | |
| sep="###", | |
| ) | |
| class Chat: | |
| def __init__(self, model, vis_processor, device='cuda:0'): | |
| self.device = device | |
| self.model = model | |
| self.vis_processor = vis_processor | |
| def ask(self, text, conv): | |
| #conv.messages = [] #hack not keeping history. | |
| conv.append_message(conv.roles[0], text) | |
| def answer(self, conv, img_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, | |
| repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000): | |
| conv.append_message(conv.roles[1], None) | |
| question = conv.get_prompt() | |
| image = img_list[0] #torch.stack(img_list).to(self.device) | |
| output_text = self.model.generate({"image": image, "prompt": question}, num_beams=num_beams, temperature=temperature) | |
| conv.messages[-1][1] = output_text | |
| return output_text, '' | |
| def upload_img(self, image, conv, img_list): | |
| if isinstance(image, str): # is a image path | |
| raw_image = Image.open(image).convert('RGB') | |
| image = self.vis_processor(raw_image).unsqueeze(0).to(self.device) | |
| elif isinstance(image, Image.Image): | |
| raw_image = image | |
| raw_image = raw_image.convert('RGB') | |
| image = self.vis_processor(raw_image).unsqueeze(0).to(self.device) | |
| elif isinstance(image, torch.Tensor): | |
| if len(image.shape) == 3: | |
| image = image.unsqueeze(0) | |
| image = image.to(self.device) | |
| #image_emb, _ = self.model.encode_img(image) | |
| img_list.append(image) | |
| #conv.append_message(conv.roles[0], "") | |
| msg = "Received." | |
| # self.conv.append_message(self.conv.roles[1], msg) | |
| return msg | |