Ravis-gemini / app.py
nttwt1597's picture
Update app.py
2114ae1 verified
raw
history blame
4.6 kB
# -*- coding: utf-8 -*-
"""Load Model and Run Gradio - llama.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1IQ2EW-KFfdkxEL8sZSfXA0WcS7ZHVPIf
"""
import os
token=os.environ['token']
# !pip install gradio --quiet
# !pip install requests --quiet
# !pip install -Uq xformers --index-url https://download.pytorch.org/whl/cu121
import torch
import gradio as gr
# For getting tokenizer()
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
peft_model_adapter_id = "nttwt1597/test_v2"
# model_directory = "./model/"
# device = "cuda" if torch.cuda.is_available() else "cpu"
# print("Using:", device)
#Commented out IPython magic to ensure Python compatibility.
#%%capture
major_version, minor_version = torch.cuda.get_device_capability()
# Must install separately since Colab has torch 2.2.1, which breaks packages
#!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
# if major_version >= 8:
# # Use this for new GPUs like Ampere, Hopper GPUs (RTX 30xx, RTX 40xx, A100, H100, L40)
# !pip install --no-deps packaging ninja einops flash-attn xformers trl peft accelerate bitsandbytes
# else:
# # Use this for older GPUs (V100, Tesla T4, RTX 20xx)
# !pip install --no-deps xformers trl peft accelerate bitsandbytes
# pass
# cuda 12.1 version
from unsloth import FastLanguageModel
from peft import PeftConfig, PeftModel, get_peft_model
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = model_id, # YOUR MODEL YOU USED FOR TRAINING
max_seq_length = 4096,
dtype = None,
load_in_4bit = True,
)
model.load_adapter(peft_model_adapter_id, token=token)
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
from transformers import pipeline, TextIteratorStreamer
from threading import Thread
criteria_prompt = """ Please generate the eligibility criteria, which will be used in clinical research, based on the given clinical trials information as a clinical researcher.
### Clinical trial information:
{}
### Eligibility criteria:
{}"""
def format_prompt(text):
return criteria_prompt.format(text, "")
def run_model_on_text(text):
prompt = format_prompt(text)
inputs = tokenizer(prompt, return_tensors='pt')
# prompt is a new string stored in memory not cuda.
# inputs = inputs.to(device)
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=True, eos_token_id=terminators, repetition_penalty=1.2,)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
generated_text = ""
for new_text in streamer:
generated_text += new_text
yield generated_text
place_holder = f"""Study Objectives
[Brief Summary] Optical diagnosis of colorectal polyps is a promising tool to avoid risks of unnecessary polypectomies and to save costs of tissue pathology. NICE (NBI International Colorectal Endoscopic) and WASP (Workgroup on Serrated Polyps and Polyposis) classification were developed for diagnosis of adenomatous and sessile serrated polyps, respectively.
[Detailed Description] Near-focus (NF) narrow-band imaging (NBI) is an image-magnifying technology which enables optical magnification of up to 65x in near focus (NF) compared with 52x in normal standard focus (SF) with the simple push of a button of the endoscope to be interchangeable between NF and SF. There were few studies comparing diagnostic accuracy between NF and SF in the diagnosis of colorectal polyps. So, our aim of the current study is to compare accuracy of NF NBI compared with SF NBI in the optical diagnosis of neoplastic and non-neoplastic polyp and the accuracy of NF NBI versus SF NBI in distinguishing serrated adenoma from hyperplastic polyp in sessile lesions using histologic evaluation as the gold standard.
Conditions: Colorectal Polyp, Colorectal Neoplasms
Intervention / Treatment:
Diagnostic Test: Near Focus NBI
Diagnostic Test: Standard Focus NBI"""
prefilled_value = """Study Objectives
[Brief Summary] and/or [Detailed Description]:
Conditions:
Intervention / Treatment:"""
prompt_box = gr.Textbox(
lines=25,
label="Research Information",
placeholder=place_holder,
value=prefilled_value,
)
output_box = gr.Textbox(
lines=25,
label="Eligiblecriteria Criteria",
)
demo = gr.Interface(
fn=run_model_on_text,
inputs=prompt_box,
outputs=output_box,
allow_flagging='auto',
)
demo.queue(concurrency_limit=100).launch(share=True ,debug=True)