Spaces:
Sleeping
Sleeping
Rajeev-86
commited on
Commit
·
1e58902
1
Parent(s):
e9dee47
made chnages in models
Browse files- Dockerfile +3 -3
- image_handler.py +32 -3
Dockerfile
CHANGED
|
@@ -14,9 +14,9 @@ EXPOSE 8080
|
|
| 14 |
|
| 15 |
# 3. Installing gdown, downloading models and immediately uninstalling it for a smaller image
|
| 16 |
RUN pip install --no-cache-dir gdown && \
|
| 17 |
-
gdown
|
| 18 |
-
gdown
|
| 19 |
-
gdown
|
| 20 |
pip uninstall gdown -y
|
| 21 |
|
| 22 |
USER model-server
|
|
|
|
| 14 |
|
| 15 |
# 3. Installing gdown, downloading models and immediately uninstalling it for a smaller image
|
| 16 |
RUN pip install --no-cache-dir gdown && \
|
| 17 |
+
gdown 1yZfaeCagywUGfObCyhwNvd9hYVdXH84t -O model_store/UNET.mar && \
|
| 18 |
+
gdown 1632np236SU0ZFM9Li4EbcwpNyLPcio9A -O model_store/R-UNET.mar && \
|
| 19 |
+
gdown 1LyE6FQzY6wQI0nWwj3s1O29jseiF5-xo -O model_store/A-R-UNET.mar && \
|
| 20 |
pip uninstall gdown -y
|
| 21 |
|
| 22 |
USER model-server
|
image_handler.py
CHANGED
|
@@ -3,18 +3,29 @@ import torch
|
|
| 3 |
import torchvision.transforms as transforms
|
| 4 |
from PIL import Image
|
| 5 |
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
class ImageHandler(BaseHandler):
|
| 8 |
def __init__(self):
|
| 9 |
super(ImageHandler, self).__init__()
|
| 10 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 11 |
self.transform = transforms.Compose([transforms.ToTensor()])
|
|
|
|
|
|
|
| 12 |
|
| 13 |
def preprocess(self, data):
|
| 14 |
-
|
|
|
|
| 15 |
image_bytes = data[0].get("body")
|
| 16 |
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
|
|
|
|
|
|
| 17 |
tensor = self.transform(image).unsqueeze(0).to(self.device)
|
|
|
|
| 18 |
return tensor
|
| 19 |
|
| 20 |
def inference(self, data, *args, **kwargs):
|
|
@@ -23,9 +34,27 @@ class ImageHandler(BaseHandler):
|
|
| 23 |
return output
|
| 24 |
|
| 25 |
def postprocess(self, data):
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
buf = io.BytesIO()
|
| 30 |
output_image.save(buf, format="PNG")
|
| 31 |
return [buf.getvalue()]
|
|
|
|
| 3 |
import torchvision.transforms as transforms
|
| 4 |
from PIL import Image
|
| 5 |
import io
|
| 6 |
+
import time
|
| 7 |
+
import logging
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
class ImageHandler(BaseHandler):
|
| 13 |
def __init__(self):
|
| 14 |
super(ImageHandler, self).__init__()
|
| 15 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 16 |
self.transform = transforms.Compose([transforms.ToTensor()])
|
| 17 |
+
self.input_tensor_for_metrics = None
|
| 18 |
+
self.start_time = 0
|
| 19 |
|
| 20 |
def preprocess(self, data):
|
| 21 |
+
self.start_time = time.time()
|
| 22 |
+
|
| 23 |
image_bytes = data[0].get("body")
|
| 24 |
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
| 25 |
+
width, height = image.size
|
| 26 |
+
logger.info(f"DATA_QUALITY: resolution={width}x{height}, format={image.format}")
|
| 27 |
tensor = self.transform(image).unsqueeze(0).to(self.device)
|
| 28 |
+
self.input_tensor_for_metrics = tensor.clone().detach()
|
| 29 |
return tensor
|
| 30 |
|
| 31 |
def inference(self, data, *args, **kwargs):
|
|
|
|
| 34 |
return output
|
| 35 |
|
| 36 |
def postprocess(self, data):
|
| 37 |
+
output_batched = data
|
| 38 |
+
input_batched = self.input_tensor_for_metrics
|
| 39 |
+
output_tensor = output_batched.squeeze(0).cpu().clamp(0, 1)
|
| 40 |
+
input_tensor = input_batched.squeeze(0).cpu()
|
| 41 |
+
output_tensor_resized = output_tensor
|
| 42 |
+
if output_tensor.shape != input_tensor.shape:
|
| 43 |
+
output_tensor_resized = F.interpolate(
|
| 44 |
+
output_tensor.unsqueeze(0),
|
| 45 |
+
size=input_tensor.shape[-2:],
|
| 46 |
+
mode='bilinear',
|
| 47 |
+
align_corners=False
|
| 48 |
+
).squeeze(0)
|
| 49 |
+
|
| 50 |
+
pixel_difference = torch.mean(torch.abs(input_tensor - output_tensor_resized)).item()
|
| 51 |
+
logger.info(f"OUTPUT_QUALITY: denoising_intensity={pixel_difference:.4f}")
|
| 52 |
|
| 53 |
+
end_time = time.time()
|
| 54 |
+
latency_ms = (end_time - self.start_time) * 1000
|
| 55 |
+
logger.info(f"OPERATIONAL_HEALTH: total_latency={latency_ms:.2f}ms")
|
| 56 |
+
|
| 57 |
+
output_image = transforms.ToPILImage()(output_tensor)
|
| 58 |
buf = io.BytesIO()
|
| 59 |
output_image.save(buf, format="PNG")
|
| 60 |
return [buf.getvalue()]
|