File size: 6,235 Bytes
770451b
e0ca0de
 
668dced
72744fc
 
e0ca0de
 
72744fc
e0ca0de
72744fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0ca0de
 
 
 
a004d4a
72744fc
 
 
 
 
 
 
 
 
 
a004d4a
72744fc
 
668dced
72744fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b571be
72744fc
 
 
 
 
5b571be
72744fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a004d4a
72744fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1fea0a1
72744fc
 
 
a004d4a
72744fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0ca0de
72744fc
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
import gradio as gr
import requests
from urllib.parse import parse_qs, urlparse
from transformers import pipeline
import os
import time

# Backend API endpoints
BACKEND_BASE_URL = "http://127.0.0.1:5000"
COMPLETE_APPOINTMENT_ENDPOINT = f"{BACKEND_BASE_URL}/complete_appointment"
VALIDATE_APPOINTMENT_ENDPOINT = f"{BACKEND_BASE_URL}/validate_appointment"

# Path to your pre-generated avatar video
AVATAR_VIDEO_PATH = "http://127.0.0.1:5000/serve_video/ai_doctor_avatar.mp4"  # Replace with your actual video URL
 # <source src="http://127.0.0.1:5000/serve_video/ai_doctor_avatar.mp4" type="video/mp4">

# Load lightweight models for faster performance
conversation_pipeline = pipeline(
    "text-generation",
    model="microsoft/DialoGPT-medium",
    device="cpu"  # Change to "cuda" if GPU available
)

medical_qa_pipeline = pipeline(
    "question-answering",
    model="medicalai/ClinicalBERT_QA",
    device="cpu"
)

def get_appointment_id():
    query_params = parse_qs(urlparse(gr.Request().url).query)
    return query_params.get("appointment_id", ["Unknown"])[0]

def validate_appointment(appointment_id):
    try:
        response = requests.post(
            VALIDATE_APPOINTMENT_ENDPOINT,
            json={"appointment_id": appointment_id},
            timeout=3
        )
        return response.json().get("status") == "success"
    except Exception:
        return False

def generate_response(user_input):
    """Lightning-fast response generation"""
    try:
        # Try medical QA first
        qa_result = medical_qa_pipeline({
            "question": user_input,
            "context": "Medical consultation between doctor and patient"
        })
        if qa_result['score'] > 0.25:
            return qa_result['answer']
        
        # Fallback to conversational AI
        return conversation_pipeline(
            f"Patient: {user_input}\nDoctor:",
            max_length=150,
            num_return_sequences=1,
            do_sample=True,
            top_p=0.9,
            temperature=0.7
        )[0]["generated_text"].split("Doctor:")[-1].strip()
    
    except Exception:
        return "I'm experiencing high demand. Could you please repeat your question?"

def end_call(appointment_id):
    try:
        response = requests.post(
            COMPLETE_APPOINTMENT_ENDPOINT,
            json={"appointment_id": appointment_id},
            timeout=3
        )
        if response.status_code == 200:
            return "Consultation completed successfully. Thank you!"
        return "Couldn't complete appointment. Please contact support."
    except Exception:
        return "Network error. Please check your connection."

# Custom CSS for blazing fast UI
custom_css = """
:root {
    --primary: #2d8cf0;
    --secondary: #f8f9fa;
    --accent: #ff6b6b;
}

.gradio-container {
    font-family: 'Inter', sans-serif;
    max-width: 1200px !important;
}

.avatar-container {
    aspect-ratio: 9/16;
    background: black;
    border-radius: 12px;
    overflow: hidden;
}

.video-container video {
    object-fit: cover;
    width: 100%;
    height: 100%;
}

.chat-container {
    height: 100%;
    display: flex;
    flex-direction: column;
}

.chatbot {
    min-height: 500px;
    flex-grow: 1;
    border-radius: 12px;
    background: var(--secondary);
}

.input-row {
    margin-top: 0.5rem !important;
}

.primary-btn {
    background: var(--primary) !important;
}

.end-btn {
    background: var(--accent) !important;
}

/* Animation for smooth loading */
@keyframes fadeIn {
    from { opacity: 0; }
    to { opacity: 1; }
}

.gradio-app {
    animation: fadeIn 0.3s ease-in;
}
"""

with gr.Blocks(css=custom_css, title="AI Doctor Consultation") as demo:
    appointment_id = gr.State(value=get_appointment_id())
    
    # Header
    gr.Markdown("""
    <div style="text-align: center; margin-bottom: 1rem;">
        <h1 style="margin: 0; color: #2d8cf0;">AI Doctor Consultation</h1>
        <p style="margin: 0; color: #666;">Your health matters to us</p>
    </div>
    """)
    
    with gr.Row(equal_height=True):
        # Left column - Avatar video
        with gr.Column(scale=1, elem_classes=["avatar-container"]):
            gr.Markdown("### Dr. AI Avatar")
            video = gr.Video(
                value=AVATAR_VIDEO_PATH,
                autoplay=True,
                interactive=False,
                elem_classes=["video-container"]
            )
            
        # Right column - Chat interface
        with gr.Column(scale=2, elem_classes=["chat-container"]):
            chatbot = gr.Chatbot(
                label="Consultation Chat",
                bubble_full_width=False,
                show_copy_button=True,
                avatar_images=(
                    "https://i.imgur.com/8Km9tLL.png",  # User
                    "https://i.imgur.com/3Q3ZQ2u.png"   # Doctor
                )
            )
            
            with gr.Row(elem_classes=["input-row"]):
                user_input = gr.Textbox(
                    placeholder="Describe your symptoms...",
                    label="",
                    container=False,
                    autofocus=True,
                    max_lines=3
                )
                submit_btn = gr.Button("Send", variant="primary", elem_classes=["primary-btn"])
            
            with gr.Row():
                clear_btn = gr.Button("Clear Chat", variant="secondary")
                end_btn = gr.Button("End Consultation", variant="stop", elem_classes=["end-btn"])
                status = gr.Textbox(visible=False)
    
    # Event handlers
    submit_btn.click(
        fn=lambda msg, hist: (msg, hist + [(msg, generate_response(msg))]),
        inputs=[user_input, chatbot],
        outputs=[user_input, chatbot],
        queue=True
    ).then(
        lambda: gr.update(autoplay=True),  # Ensure video keeps playing
        outputs=video
    )
    
    clear_btn.click(lambda: [], None, chatbot)
    end_btn.click(
        fn=end_call,
        inputs=appointment_id,
        outputs=status
    )

# Optimized launch settings
demo.launch(
    server_name="0.0.0.0",
    server_port=7860,
    share=False,
    favicon_path="https://i.imgur.com/3Q3ZQ2u.png",
    prevent_thread_lock=True
)