Spaces:
Running
Running
| import os | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| api_key = os.getenv("OPENAI_API_KEY","") | |
| url = os.getenv("OPENAI_API_URL","https://api.openai.com/v1/") | |
| model = os.getenv("OPENAI_API_MODEL","openai/gpt-oss-120b") | |
| client = OpenAI( | |
| api_key=api_key, | |
| base_url=url | |
| ) | |
| def run_openai(prompt: str, sys_prompt: str) -> str: | |
| try: | |
| response = client.chat.completions.create( | |
| model=model, | |
| max_tokens=5000, | |
| temperature=1, | |
| presence_penalty=0, | |
| top_p=0.95, | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": sys_prompt | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ] | |
| ) | |
| except Exception as e: | |
| return f"LLM infer filed: {type(e).__name__}" | |
| return response.choices[0].message.content |