diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..cb5324d3ca5736e8871b838b7ce067b15689aa17 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.so filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index c6e593ad0d1397ff639f157cb30adbb6a1aaeb88..071ea09ee215d2867f1430d9ec5603eb15c898e2 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,8 @@ colorFrom: pink colorTo: green sdk: gradio sdk_version: 5.49.1 -app_file: app.py +app_file: demo/gradio_demo_with_sam3.py pinned: false license: apache-2.0 short_description: Complex text label dection using SAM3 with VLM-FO1 --- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/demo/gradio_demo.py b/demo/gradio_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..462ed435347dd66a969f38ba99aec540b27e5a55 --- /dev/null +++ b/demo/gradio_demo.py @@ -0,0 +1,374 @@ +import gradio as gr +from PIL import Image, ImageDraw, ImageFont +import re +import numpy as np +from skimage.measure import label, regionprops +from skimage.morphology import binary_dilation, disk +from detect_tools.upn import UPNWrapper +from vlm_fo1.model.builder import load_pretrained_model +from vlm_fo1.mm_utils import ( + prepare_inputs, + extract_predictions_to_indexes, +) +from vlm_fo1.task_templates import * +import torch +import os +from copy import deepcopy + + +TASK_TYPES = { + "OD/REC": OD_template, + "ODCounting": OD_Counting_template, + "Region_OCR": "Please provide the ocr results of these regions in the image.", + "Brief_Region_Caption": "Provide a brief description for these regions in the image.", + "Detailed_Region_Caption": "Provide a detailed description for these regions in the image.", + "Viusal_Region_Reasoning": Viusal_Region_Reasoning_template, + "OD_All": OD_All_template, + "Grounding": Grounding_template, +} + +EXAMPLES = [ + ["demo_image.jpg", TASK_TYPES["OD/REC"].format("orange, apple"), "OD/REC"], + ["demo_image_01.jpg", TASK_TYPES["ODCounting"].format("airplane with only one propeller"), "ODCounting"], + ["demo_image_02.jpg", TASK_TYPES["OD/REC"].format("the ball closest to the bear"), "OD/REC"], + ["demo_image_03.jpg", TASK_TYPES["OD_All"].format(""), "OD_All"], + ["demo_image_03.jpg", TASK_TYPES["Viusal_Region_Reasoning"].format("What's the brand of this computer?"), "Viusal_Region_Reasoning"], +] + + +def get_valid_examples(): + valid_examples = [] + demo_dir = os.path.dirname(os.path.abspath(__file__)) + for example in EXAMPLES: + img_path = example[0] + full_path = os.path.join(demo_dir, img_path) + if os.path.exists(full_path): + valid_examples.append([ + full_path, + example[1], + example[2] + ]) + elif os.path.exists(img_path): + valid_examples.append([ + img_path, + example[1], + example[2] + ]) + return valid_examples + + +def detect_model(image, threshold=0.3): + proposals = upn_model.inference(image) + filtered_proposals = upn_model.filter(proposals, min_score=threshold) + return filtered_proposals['original_xyxy_boxes'][0][:100] + + +def multimodal_model(image, bboxes, text): + if '' in text: + print(text) + parts = [part.replace('\\n', '\n') for part in re.split(rf'()', text) if part.strip()] + print(parts) + content = [] + for part in parts: + if part == '': + content.append({"type": "image_url", "image_url": {"url": image}}) + else: + content.append({"type": "text", "text": part}) + else: + content = [{ + "type": "image_url", + "image_url": { + "url": image + } + }, { + "type": "text", + "text": text + }] + + messages = [ + { + "role": "user", + "content": content, + "bbox_list": bboxes + } + ] + generation_kwargs = prepare_inputs(model_path, model, image_processors, tokenizer, messages, + max_tokens=4096, top_p=0.05, temperature=0.0, do_sample=False) + with torch.inference_mode(): + output_ids = model.generate(**generation_kwargs) + outputs = tokenizer.decode(output_ids[0, generation_kwargs['inputs'].shape[1]:]).strip() + print("========output========\n", outputs) + + if '' in outputs: + prediction_dict = extract_predictions_to_indexes(outputs) + else: + match_pattern = r"" + matches = re.findall(match_pattern, outputs) + prediction_dict = {f"": {int(m)} for m in matches} + + ans_bbox_json = [] + ans_bbox_list = [] + for k, v in prediction_dict.items(): + for box_index in v: + box_index = int(box_index) + if box_index < len(bboxes): + current_bbox = bboxes[box_index] + ans_bbox_json.append({ + "region_index": f"", + "xmin": current_bbox[0], + "ymin": current_bbox[1], + "xmax": current_bbox[2], + "ymax": current_bbox[3], + "label": k + }) + ans_bbox_list.append(current_bbox) + + return outputs, ans_bbox_json, ans_bbox_list + + +def draw_bboxes(image, bboxes, labels=None): + image = image.copy() + draw = ImageDraw.Draw(image) + + for bbox in bboxes: + draw.rectangle(bbox, outline="red", width=3) + return image + + +def extract_bbox_and_original_image(edited_image): + """Extract original image and bounding boxes from ImageEditor output""" + if edited_image is None: + return None, [] + + if isinstance(edited_image, dict): + original_image = edited_image.get("background") + bbox_list = [] + + if original_image is None: + return None, [] + + if edited_image.get("layers") is None or len(edited_image.get("layers", [])) == 0: + return original_image, [] + + try: + drawing_layer = edited_image["layers"][0] + alpha_channel = drawing_layer.getchannel('A') + alpha_np = np.array(alpha_channel) + + binary_mask = alpha_np > 0 + + structuring_element = disk(5) + dilated_mask = binary_dilation(binary_mask, structuring_element) + + labeled_image = label(dilated_mask) + regions = regionprops(labeled_image) + + for prop in regions: + y_min, x_min, y_max, x_max = prop.bbox + bbox_list.append((x_min, y_min, x_max, y_max)) + except Exception as e: + print(f"Error extracting bboxes from layers: {e}") + return original_image, [] + + return original_image, bbox_list + elif isinstance(edited_image, Image.Image): + return edited_image, [] + else: + print(f"Unknown input type: {type(edited_image)}") + return None, [] + + +def process(image, example_image, prompt, threshold): + image, bbox_list = extract_bbox_and_original_image(image) + + if example_image is not None: + image = example_image + + if image is None: + error_msg = "Error: Please upload an image or select a valid example." + print(f"Error: image is None, original input type: {type(image)}") + return None, None, error_msg, [] + + try: + image = image.convert('RGB') + except Exception as e: + error_msg = f"Error: Cannot process image - {str(e)}" + return None, None, error_msg, [] + + if len(bbox_list) == 0: + bboxes = detect_model(image, threshold) + else: + bboxes = bbox_list + for idx in range(len(bboxes)): + prompt += f'' + + ans, ans_bbox_json, ans_bbox_list = multimodal_model(image, bboxes, prompt) + + image_with_detection = draw_bboxes(image, bboxes) + + annotated_bboxes = [] + if len(ans_bbox_json) > 0: + for item in ans_bbox_json: + annotated_bboxes.append( + ((int(item['xmin']), int(item['ymin']), int(item['xmax']), int(item['ymax'])), item['label']) + ) + annotated_image = (image, annotated_bboxes) + + return annotated_image, image_with_detection, ans, ans_bbox_json + + +def update_btn(is_processing): + if is_processing: + return gr.update(value="Processing...", interactive=False) + else: + return gr.update(value="Submit", interactive=True) + + +def launch_demo(): + with gr.Blocks() as demo: + gr.Markdown("# 🚀 VLM-FO1 Demo") + gr.Markdown(""" + ### 📋 Instructions + + **Step 1: Prepare Your Image** + - Upload an image using the image editor below + - *Optional:* Draw circular regions with the red brush to specify areas of interest + - *Alternative:* If not drawing regions, the detection model will automatically identify regions + + **Step 2: Configure Your Task** + - Select a task template from the dropdown menu + - Replace `[WRITE YOUR INPUT HERE]` with your target objects or query + - *Example:* For detecting "person" and "dog", replace with: `person, dog` + - *Or:* Write your own custom prompt + + **Step 3: Fine-tune Detection** *(Optional)* + - Adjust the detection threshold slider to control sensitivity + + **Step 4: Generate Results** + - Click the **Submit** button to process your request + - View the detection results and model outputs below + + 🔗 [GitHub Repository](https://github.com/om-ai-lab/VLM-FO1) + """) + + with gr.Row(): + with gr.Column(): + img_input_draw = gr.ImageEditor( + label="Image Input", + image_mode="RGBA", + type="pil", + sources=['upload'], + brush=gr.Brush(colors=["#FF0000"], color_mode="fixed", default_size=2), + interactive=True + ) + + gr.Markdown("### Prompt & Parameters") + + def set_prompt_from_template(selected_task): + return gr.update(value=TASK_TYPES[selected_task].format("[WRITE YOUR INPUT HERE]")) + + def load_example(prompt_input, task_type_input, hidden_image_box): + cached_image = deepcopy(hidden_image_box) + w, h = cached_image.size + + transparent_layer = Image.new('RGBA', (w, h), (0, 0, 0, 0)) + + new_editor_value = { + "background": cached_image, + "layers": [transparent_layer], + "composite": None + } + + return new_editor_value, prompt_input, task_type_input + + def reset_hidden_image_box(): + return gr.update(value=None) + + task_type_input = gr.Dropdown( + choices=list(TASK_TYPES.keys()), + value="OD/REC", + label="Prompt Templates", + info="Select the prompt template for the task, or write your own prompt." + ) + + prompt_input = gr.Textbox( + label="Task Prompt", + value=TASK_TYPES["OD/REC"].format("[WRITE YOUR INPUT HERE]"), + lines=2, + ) + + task_type_input.select( + set_prompt_from_template, + inputs=task_type_input, + outputs=prompt_input + ) + + hidden_image_box = gr.Image(label="Image", type="pil", image_mode="RGBA", visible=False) + + threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value=0.3, step=0.01, label="Detection Model Threshold") + submit_btn = gr.Button("Submit", variant="primary") + + valid_examples = get_valid_examples() + if len(valid_examples) > 0: + gr.Markdown("### Examples") + gr.Markdown("Click on the examples below to quickly load images and corresponding prompts:") + + examples_data = [[example[0], example[1], example[2]] for index, example in enumerate(valid_examples)] + + examples = gr.Examples( + examples=examples_data, + inputs=[hidden_image_box, prompt_input, task_type_input], + label="Click to load example", + examples_per_page=5 + ) + + examples.load_input_event.then( + fn=load_example, + inputs=[prompt_input, task_type_input, hidden_image_box], + outputs=[img_input_draw, prompt_input, task_type_input] + ) + + img_input_draw.upload( + fn=reset_hidden_image_box, + outputs=[hidden_image_box] + ) + + with gr.Column(): + with gr.Accordion("Detection Result", open=True): + image_with_detection = gr.Image(label="Detection Result", height=200) + + image_output = gr.AnnotatedImage(label="VLM-FO1 Result", height=400) + + result_output = gr.Textbox(label="VLM-FO1 Output", lines=5) + ans_bbox_json = gr.JSON(label="Extracted Detection Output") + + submit_btn.click( + update_btn, + inputs=[gr.State(True)], + outputs=[submit_btn], + queue=False + ).then( + process, + inputs=[img_input_draw, hidden_image_box, prompt_input, threshold_input], + outputs=[image_output, image_with_detection, result_output, ans_bbox_json], + queue=True + ).then( + update_btn, + inputs=[gr.State(False)], + outputs=[submit_btn], + queue=False + ) + + return demo + +if __name__ == "__main__": + model_path = './resources/VLM-FO1_Qwen2.5-VL-3B-v01' + upn_ckpt_path = "./resources/upn_large.pth" + tokenizer, model, image_processors = load_pretrained_model( + model_path=model_path, + device="cuda:0", + ) + upn_model = UPNWrapper(upn_ckpt_path) + + demo = launch_demo() + demo.launch(server_name="0.0.0.0", share=False, server_port=8000, debug=False) diff --git a/demo/gradio_demo_with_sam3.py b/demo/gradio_demo_with_sam3.py new file mode 100644 index 0000000000000000000000000000000000000000..750212bcbaa0bc27e4c2f59e601d066dc2b46d5a --- /dev/null +++ b/demo/gradio_demo_with_sam3.py @@ -0,0 +1,323 @@ +import gradio as gr +import spaces +from PIL import Image, ImageDraw, ImageFont +import re +import numpy as np +from skimage.measure import label, regionprops +from skimage.morphology import binary_dilation, disk +from sam3.model_builder import build_sam3_image_model +from sam3.model.sam3_image_processor import Sam3Processor +from sam3.visualization_utils import plot_bbox, plot_mask, COLORS +import matplotlib.pyplot as plt + +from vlm_fo1.model.builder import load_pretrained_model +from vlm_fo1.mm_utils import ( + prepare_inputs, + extract_predictions_to_indexes, +) +from vlm_fo1.task_templates import * +import torch +import os +from copy import deepcopy + + +EXAMPLES = [ + ["demo/sam3_examples/00000-72.jpg","airplane with letter AE on its body"], + ["demo/sam3_examples/00000-32.jpg","the lying cat which is not black"], + ["demo/sam3_examples/00000-22.jpg","person wearing a black top"], + ["demo/sam3_examples/000000378453.jpg", "zebra inside the mud puddle"], + ["demo/sam3_examples/00000-242.jpg", "person who is holding a book"], +] + + +def get_valid_examples(): + valid_examples = [] + demo_dir = os.path.dirname(os.path.abspath(__file__)) + for example in EXAMPLES: + img_path = example[0] + full_path = os.path.join(demo_dir, img_path) + if os.path.exists(full_path): + valid_examples.append([ + full_path, + example[1], + example[2] + ]) + elif os.path.exists(img_path): + valid_examples.append([ + img_path, + example[1], + example[2] + ]) + return valid_examples + + +def detect_model(image, text, threshold=0.3): + inference_state = sam3_processor.set_image(image) + output = sam3_processor.set_text_prompt(state=inference_state, prompt=text) + boxes, scores, masks = output["boxes"], output["scores"], output["masks"] + sorted_indices = torch.argsort(scores, descending=True) + boxes = boxes[sorted_indices][:100, :] + scores = scores[sorted_indices][:100] + masks = masks[sorted_indices][:100] + # If the highest confidence score is greater than 0.5, filter with 0.3 threshold + if len(scores) > 0 and scores[0] > 0.75: + conf_threshold = 0.3 + + else: + conf_threshold = 0.05 + mask = scores > conf_threshold + boxes = boxes[mask] + scores = scores[mask] + masks = masks[mask] + # Keep boxes with score > 0.8 in a separate list + high_conf_mask = scores > 0.8 + high_conf_boxes = boxes[high_conf_mask] + + print("========boxes========\n", boxes.tolist()) + print("========scores========\n", scores.tolist()) + print("========high_conf_boxes (>0.8)========\n", high_conf_boxes.tolist()) + + output = { + "boxes": boxes, + "scores": scores, + "masks": masks, + } + return boxes.tolist(), scores.tolist(), high_conf_boxes.tolist(), masks.tolist(), output + + +def multimodal_model(image, bboxes, scores, text): + if len(bboxes) == 0: + return None, {}, [] + + if '' in text: + print(text) + parts = [part.replace('\\n', '\n') for part in re.split(rf'()', text) if part.strip()] + print(parts) + content = [] + for part in parts: + if part == '': + content.append({"type": "image_url", "image_url": {"url": image}}) + else: + content.append({"type": "text", "text": part}) + else: + content = [{ + "type": "image_url", + "image_url": { + "url": image + } + }, { + "type": "text", + "text": text + }] + + messages = [ + { + "role": "user", + "content": content, + "bbox_list": bboxes + } + ] + generation_kwargs = prepare_inputs(model_path, model, image_processors, tokenizer, messages, + max_tokens=4096, top_p=0.05, temperature=0.0, do_sample=False, image_size=1024) + with torch.inference_mode(): + output_ids = model.generate(**generation_kwargs) + outputs = tokenizer.decode(output_ids[0, generation_kwargs['inputs'].shape[1]:]).strip() + print("========output========\n", outputs) + + if '' in outputs: + prediction_dict = extract_predictions_to_indexes(outputs) + else: + match_pattern = r"" + matches = re.findall(match_pattern, outputs) + prediction_dict = {f"": {int(m)} for m in matches} + + ans_bbox_json = [] + ans_bbox_list = [] + for k, v in prediction_dict.items(): + for box_index in v: + box_index = int(box_index) + if box_index < len(bboxes): + current_bbox = bboxes[box_index] + current_score = scores[box_index] + ans_bbox_json.append({ + "region_index": f"", + "xmin": current_bbox[0], + "ymin": current_bbox[1], + "xmax": current_bbox[2], + "ymax": current_bbox[3], + "label": k, + "score": current_score + }) + ans_bbox_list.append(current_bbox) + + return outputs, ans_bbox_json, ans_bbox_list + + +def draw_bboxes(img, results): + fig, ax = plt.subplots(figsize=(12, 8)) + # fig.subplots_adjust(0, 0, 1, 1) + ax.imshow(img) + nb_objects = len(results["scores"]) + print(f"found {nb_objects} object(s)") + for i in range(nb_objects): + color = COLORS[i % len(COLORS)] + plot_mask(results["masks"][i].squeeze(0).cpu(), color=color) + w, h = img.size + prob = results["scores"][i].item() + plot_bbox( + h, + w, + results["boxes"][i].cpu(), + text=f"(id={i}, {prob=:.2f})", + box_format="XYXY", + color=color, + relative_coords=False, + ) + ax.axis("off") + fig.tight_layout(pad=0) + + # Convert matplotlib figure to PIL Image + fig.canvas.draw() + buf = fig.canvas.buffer_rgba() + pil_img = Image.frombytes('RGBA', fig.canvas.get_width_height(), buf) + plt.close(fig) + + return pil_img + + +@spaces.GPU +def process(image, prompt, threshold=0): + if image is None: + error_msg = "Error: Please upload an image or select a valid example." + print(f"Error: image is None, original input type: {type(image)}") + return None, None, error_msg, [] + + try: + image = image.convert('RGB') + except Exception as e: + error_msg = f"Error: Cannot process image - {str(e)}" + return None, None, error_msg, [] + + bboxes, scores, high_conf_bboxes, masks, output = detect_model(image, prompt, threshold) + + fo1_prompt = OD_Counting_template.format(prompt) + ans, ans_bbox_json, ans_bbox_list = multimodal_model(image, bboxes, scores, fo1_prompt) + + detection_image = draw_bboxes(image, output) + + annotated_bboxes = [] + if len(ans_bbox_json) > 0: + img_width, img_height = image.size + for item in ans_bbox_json: + xmin = max(0, min(img_width, int(item['xmin']))) + ymin = max(0, min(img_height, int(item['ymin']))) + xmax = max(0, min(img_width, int(item['xmax']))) + ymax = max(0, min(img_height, int(item['ymax']))) + annotated_bboxes.append( + ((xmin, ymin, xmax, ymax), item['label']) + ) + annotated_image = (image, annotated_bboxes) + + return annotated_image, detection_image, ans_bbox_json + + +def update_btn(is_processing): + if is_processing: + return gr.update(value="Processing...", interactive=False) + else: + return gr.update(value="Submit", interactive=True) + + +def launch_demo(): + with gr.Blocks() as demo: + gr.Markdown("# 🚀 VLM-FO1 + SAM3 Demo") + gr.Markdown(""" + ### 📋 Instructions + Combine the SAM3 detection results with the VLM-FO1 model to enchance its dectection and segmentation performance on complex label tasks. + + **How it works** + 1. Upload or pick an example image. + 2. Describe the target object in natural language. + 3. Hit **Submit** to run SAM3 + VLM-FO1. + + **Outputs** + - `SAM3 Result`: raw detections with masks/bboxes generated by SAM3. + - `VLM-FO1 Result`: filtered detections plus labels generated by VLM-FO1. + + **Tips** + - One prompt at a time is currently supported. Multiple label prompts will be supported soon. + - Use the examples below to quickly explore the pipeline. + """) + + gr.Markdown(""" + ### 🔗 References + - [SAM3](https://github.com/facebookresearch/sam3) + - [VLM-FO1](https://github.com/om-ai-lab/VLM-FO1) + """) + + with gr.Row(): + with gr.Column(): + img_input_draw = gr.Image( + label="Image Input", + type="pil", + sources=['upload'], + ) + + gr.Markdown("### Prompt") + + prompt_input = gr.Textbox( + label="Label Prompt", + lines=2, + ) + + submit_btn = gr.Button("Submit", variant="primary") + + + examples = gr.Examples( + examples=EXAMPLES, + inputs=[img_input_draw, prompt_input], + label="Click to load example", + examples_per_page=5 + ) + + with gr.Column(): + with gr.Accordion("SAM3 Result", open=True): + image_output_detection = gr.Image(label="SAM3 Result", height=400) + + image_output = gr.AnnotatedImage(label="VLM-FO1 Result", height=400) + + ans_bbox_json = gr.JSON(label="Extracted Detection Output") + + submit_btn.click( + update_btn, + inputs=[gr.State(True)], + outputs=[submit_btn], + queue=False + ).then( + process, + inputs=[img_input_draw, prompt_input], + outputs=[image_output, image_output_detection, ans_bbox_json], + queue=True + ).then( + update_btn, + inputs=[gr.State(False)], + outputs=[submit_btn], + queue=False + ) + + return demo + +if __name__ == "__main__": + # model_path = './resources/VLM-FO1_Qwen2.5-VL-3B-v01' + # sam3_model_path = './resources/sam3/sam3.pt' + + model_path = 'omlab/VLM-FO1_Qwen2.5-VL-3B-v01' + tokenizer, model, image_processors = load_pretrained_model( + model_path=model_path, + device="cuda:0", + ) + sam3_model = build_sam3_image_model(device="cuda:0") + sam3_processor = Sam3Processor(sam3_model, confidence_threshold=0.0, device="cuda:0") + + demo = launch_demo() + demo.launch() diff --git a/demo/sam3_examples/init.py b/demo/sam3_examples/init.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/detect_tools/sam3/.gitignore b/detect_tools/sam3/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fcda494a8d3bbf84810b8937101d85bfe68fb38d --- /dev/null +++ b/detect_tools/sam3/.gitignore @@ -0,0 +1,153 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +*-Copy*.ipynb + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# PyCharm +.idea/ + +# VS Code +.vscode/ +*.code-workspace + +# Model weights and checkpoints +*.pth +*.pt +*.bin +*.ckpt +*.safetensors +weights/ +checkpoints/ +sam3_logs/ + +# Data files +*.h5 +*.hdf5 +*.pkl +*.pickle +*.npy +*.npz + +# Logs +logs/ +runs/ +tensorboard/ + +# OS specific +.DS_Store +Thumbs.db + +# BPE vocabulary files +*.bpe +*.vocab diff --git a/detect_tools/sam3/CODE_OF_CONDUCT.md b/detect_tools/sam3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..3232ed665566ec047ce55a929db1581dbda266a1 --- /dev/null +++ b/detect_tools/sam3/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/detect_tools/sam3/CONTRIBUTING.md b/detect_tools/sam3/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..8d0d9290ad3ce04efa27839264f7892e65924dfc --- /dev/null +++ b/detect_tools/sam3/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing to sam3 +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Make sure your code lints. +5. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to sam3, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/detect_tools/sam3/LICENSE b/detect_tools/sam3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..00030caa37d1d1714b2eb0d7f55c50c5805ed4ce --- /dev/null +++ b/detect_tools/sam3/LICENSE @@ -0,0 +1,61 @@ +SAM License +Last Updated: November 19, 2025 + +“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the SAM Materials set forth herein. + + +“SAM Materials” means, collectively, Documentation and the models, software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code, and other elements of the foregoing distributed by Meta and made available under this Agreement. + +“Documentation” means the specifications, manuals and documentation accompanying +SAM Materials distributed by Meta. + + +“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. + + +“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) or Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). + + +“Sanctions” means any economic or trade sanctions or restrictions administered or enforced by the United States (including the Office of Foreign Assets Control of the U.S. Department of the Treasury (“OFAC”), the U.S. Department of State and the U.S. Department of Commerce), the United Nations, the European Union, or the United Kingdom. + + +“Trade Controls” means any of the following: Sanctions and applicable export and import controls. + +By using or distributing any portion or element of the SAM Materials, you agree to be bound by this Agreement. + + +1. License Rights and Redistribution. + + +a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the SAM Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the SAM Materials. + +b. Redistribution and Use. +i. Distribution of SAM Materials, and any derivative works thereof, are subject to the terms of this Agreement. If you distribute or make the SAM Materials, or any derivative works thereof, available to a third party, you may only do so under the terms of this Agreement and you shall provide a copy of this Agreement with any such SAM Materials. + + +ii. If you submit for publication the results of research you perform on, using, or otherwise in connection with SAM Materials, you must acknowledge the use of SAM Materials in your publication. + + +iii. Your use of the SAM Materials must comply with applicable laws and regulations, including Trade Control Laws and applicable privacy and data protection laws. +iv. Your use of the SAM Materials will not involve or encourage others to reverse engineer, decompile or discover the underlying components of the SAM Materials. +v. You are not the target of Trade Controls and your use of SAM Materials must comply with Trade Controls. You agree not to use, or permit others to use, SAM Materials for any activities subject to the International Traffic in Arms Regulations (ITAR) or end uses prohibited by Trade Controls, including those related to military or warfare purposes, nuclear industries or applications, espionage, or the development or use of guns or illegal weapons. +2. User Support. Your use of the SAM Materials is done at your own discretion; Meta does not process any information nor provide any service in relation to such use. Meta is under no obligation to provide any support services for the SAM Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind. + + +3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SAM MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SAM MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SAM MATERIALS AND ANY OUTPUT AND RESULTS. + +4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT OR INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. + +5. Intellectual Property. + + +a. Subject to Meta’s ownership of SAM Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the SAM Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. + +b. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the SAM Materials, outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the SAM Materials. + +6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the SAM Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the SAM Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. + +7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. + + +8. Modifications and Amendments. Meta may modify this Agreement from time to time; provided that they are similar in spirit to the current version of the Agreement, but may differ in detail to address new problems or concerns. All such changes will be effective immediately. Your continued use of the SAM Materials after any modification to this Agreement constitutes your agreement to such modification. Except as provided in this Agreement, no modification or addition to any provision of this Agreement will be binding unless it is in writing and signed by an authorized representative of both you and Meta. diff --git a/detect_tools/sam3/MANIFEST.in b/detect_tools/sam3/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..6daf92e44ba1bc4ab5ce60fbf4bf79089df17473 --- /dev/null +++ b/detect_tools/sam3/MANIFEST.in @@ -0,0 +1,6 @@ +include LICENSE +include README.md +recursive-include examples *.py +recursive-include examples *.ipynb +recursive-include examples *.md +recursive-include tests *.py diff --git a/detect_tools/sam3/README.md b/detect_tools/sam3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..709f872e77b17d3a975bc7fe2d9907702b2a62eb --- /dev/null +++ b/detect_tools/sam3/README.md @@ -0,0 +1,387 @@ +# SAM 3: Segment Anything with Concepts + +Meta Superintelligence Labs + +[Nicolas Carion](https://www.nicolascarion.com/)\*, +[Laura Gustafson](https://scholar.google.com/citations?user=c8IpF9gAAAAJ&hl=en)\*, +[Yuan-Ting Hu](https://scholar.google.com/citations?user=E8DVVYQAAAAJ&hl=en)\*, +[Shoubhik Debnath](https://scholar.google.com/citations?user=fb6FOfsAAAAJ&hl=en)\*, +[Ronghang Hu](https://ronghanghu.com/)\*, +[Didac Suris](https://www.didacsuris.com/)\*, +[Chaitanya Ryali](https://scholar.google.com/citations?user=4LWx24UAAAAJ&hl=en)\*, +[Kalyan Vasudev Alwala](https://scholar.google.co.in/citations?user=m34oaWEAAAAJ&hl=en)\*, +[Haitham Khedr](https://hkhedr.com/)\*, Andrew Huang, +[Jie Lei](https://jayleicn.github.io/), +[Tengyu Ma](https://scholar.google.com/citations?user=VeTSl0wAAAAJ&hl=en), +[Baishan Guo](https://scholar.google.com/citations?user=BC5wDu8AAAAJ&hl=en), +Arpit Kalla, [Markus Marks](https://damaggu.github.io/), +[Joseph Greer](https://scholar.google.com/citations?user=guL96CkAAAAJ&hl=en), +Meng Wang, [Peize Sun](https://peizesun.github.io/), +[Roman Rädle](https://scholar.google.com/citations?user=Tpt57v0AAAAJ&hl=en), +[Triantafyllos Afouras](https://www.robots.ox.ac.uk/~afourast/), +[Effrosyni Mavroudi](https://scholar.google.com/citations?user=vYRzGGEAAAAJ&hl=en), +[Katherine Xu](https://k8xu.github.io/)°, +[Tsung-Han Wu](https://patrickthwu.com/)°, +[Yu Zhou](https://yu-bryan-zhou.github.io/)°, +[Liliane Momeni](https://scholar.google.com/citations?user=Lb-KgVYAAAAJ&hl=en)°, +[Rishi Hazra](https://rishihazra.github.io/)°, +[Shuangrui Ding](https://mark12ding.github.io/)°, +[Sagar Vaze](https://sgvaze.github.io/)°, +[Francois Porcher](https://scholar.google.com/citations?user=LgHZ8hUAAAAJ&hl=en)°, +[Feng Li](https://fengli-ust.github.io/)°, +[Siyuan Li](https://siyuanliii.github.io/)°, +[Aishwarya Kamath](https://ashkamath.github.io/)°, +[Ho Kei Cheng](https://hkchengrex.com/)°, +[Piotr Dollar](https://pdollar.github.io/)†, +[Nikhila Ravi](https://nikhilaravi.com/)†, +[Kate Saenko](https://ai.bu.edu/ksaenko.html)†, +[Pengchuan Zhang](https://pzzhang.github.io/pzzhang/)†, +[Christoph Feichtenhofer](https://feichtenhofer.github.io/)† + +\* core contributor, ° intern, † project lead, order is random within groups + +[[`Paper`](https://ai.meta.com/research/publications/sam-3-segment-anything-with-concepts/)] +[[`Project`](https://ai.meta.com/sam3)] +[[`Demo`](https://segment-anything.com/)] +[[`Blog`](https://ai.meta.com/blog/segment-anything-model-3/)] + + +![SAM 3 architecture](assets/model_diagram.png?raw=true) SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. Compared to its predecessor [SAM 2](https://github.com/facebookresearch/sam2), SAM 3 introduces the ability to exhaustively segment all instances of an open-vocabulary concept specified by a short text phrase or exemplars. Unlike prior work, SAM 3 can handle a vastly larger set of open-vocabulary prompts. It achieves 75-80% of human performance on our new [SA-CO benchmark](https://github.com/facebookresearch/sam3/edit/main_readme/README.md#sa-co-dataset) which contains 270K unique concepts, over 50 times more than existing benchmarks. + +This breakthrough is driven by an innovative data engine that has automatically annotated over 4 million unique concepts, creating the largest high-quality open-vocabulary segmentation dataset to date. In addition, SAM 3 introduces a new model architecture featuring a presence token that improves discrimination between closely related text prompts (e.g., “a player in white” vs. “a player in red”), as well as a decoupled detector–tracker design that minimizes task interference and scales efficiently with data. + +

+ + +

+ +## Installation + +### Prerequisites + +- Python 3.12 or higher +- PyTorch 2.7 or higher +- CUDA-compatible GPU with CUDA 12.6 or higher + +1. **Create a new Conda environment:** + +```bash +conda create -n sam3 python=3.12 +conda deactivate +conda activate sam3 +``` + +2. **Install PyTorch with CUDA support:** + +```bash +pip install torch==2.7.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126 +``` + +3. **Clone the repository and install the package:** + +```bash +git clone https://github.com/facebookresearch/sam3.git +cd sam3 +pip install -e . +``` + +4. **Install additional dependencies for example notebooks or development:** + +```bash +# For running example notebooks +pip install -e ".[notebooks]" + +# For development +pip install -e ".[train,dev]" +``` + +## Getting Started + +⚠️ Before using SAM 3, please request access to the checkpoints on the SAM 3 +Hugging Face [repo](https://huggingface.co/facebook/sam3). Once accepted, you +need to be authenticated to download the checkpoints. You can do this by running +the following [steps](https://huggingface.co/docs/huggingface_hub/en/quick-start#authentication) +(e.g. `hf auth login` after generating an access token.) + +### Basic Usage + +```python +import torch +#################################### For Image #################################### +from PIL import Image +from sam3.model_builder import build_sam3_image_model +from sam3.model.sam3_image_processor import Sam3Processor +# Load the model +model = build_sam3_image_model() +processor = Sam3Processor(model) +# Load an image +image = Image.open("") +inference_state = processor.set_image(image) +# Prompt the model with text +output = processor.set_text_prompt(state=inference_state, prompt="") + +# Get the masks, bounding boxes, and scores +masks, boxes, scores = output["masks"], output["boxes"], output["scores"] + +#################################### For Video #################################### + +from sam3.model_builder import build_sam3_video_predictor + +video_predictor = build_sam3_video_predictor() +video_path = "" # a JPEG folder or an MP4 video file +# Start a session +response = video_predictor.handle_request( + request=dict( + type="start_session", + resource_path=video_path, + ) +) +response = video_predictor.handle_request( + request=dict( + type="add_prompt", + session_id=response["session_id"], + frame_index=0, # Arbitrary frame index + text="", + ) +) +output = response["outputs"] +``` + +## Examples + +The `examples` directory contains notebooks demonstrating how to use SAM3 with +various types of prompts: + +- [`sam3_image_predictor_example.ipynb`](examples/sam3_image_predictor_example.ipynb) + : Demonstrates how to prompt SAM 3 with text and visual box prompts on images. +- [`sam3_video_predictor_example.ipynb`](examples/sam3_video_predictor_example.ipynb) + : Demonstrates how to prompt SAM 3 with text prompts on videos, and doing + further interactive refinements with points. +- [`sam3_image_batched_inference.ipynb`](examples/sam3_image_batched_inference.ipynb) + : Demonstrates how to run batched inference with SAM 3 on images. +- [`sam3_agent.ipynb`](examples/sam3_agent.ipynb): Demonsterates the use of SAM + 3 Agent to segment complex text prompt on images. +- [`saco_gold_silver_vis_example.ipynb`](examples/saco_gold_silver_vis_example.ipynb) + : Shows a few examples from SA-Co image evaluation set. +- [`saco_veval_vis_example.ipynb`](examples/saco_veval_vis_example.ipynb) : + Shows a few examples from SA-Co video evaluation set. + +There are additional notebooks in the examples directory that demonstrate how to +use SAM 3 for interactive instance segmentation in images and videos (SAM 1/2 +tasks), or as a tool for an MLLM, and how to run evaluations on the SA-Co +dataset. + +To run the Jupyter notebook examples: + +```bash +# Make sure you have the notebooks dependencies installed +pip install -e ".[notebooks]" + +# Start Jupyter notebook +jupyter notebook examples/sam3_image_predictor_example.ipynb +``` + +## Model + +SAM 3 consists of a detector and a tracker that share a vision encoder. It has 848M parameters. The +detector is a DETR-based model conditioned on text, geometry, and image +exemplars. The tracker inherits the SAM 2 transformer encoder-decoder +architecture, supporting video segmentation and interactive refinement. + +## Image Results + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelInstance SegmentationBox Detection
LVISSA-Co/GoldLVISCOCOSA-Co/Gold
cgF1APcgF1cgF1APAPAPo +cgF1
Human--72.8----74.0
OWLv2*29.343.424.630.245.546.123.924.5
DINO-X-38.521.3-52.456.0-22.5
Gemini 2.513.4-13.016.1---14.4
SAM 337.248.554.140.653.656.455.755.7
+ +

* Partially trained on LVIS, APo refers to COCO-O accuracy

+ +
+ +## Video Results + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelSA-V testYT-Temporal-1B testSmartGlasses testLVVIS testBURST test
cgF1pHOTAcgF1pHOTAcgF1pHOTAmAPHOTA
Human53.170.571.278.458.572.3--
SAM 330.358.050.869.936.463.636.344.5
+
+ +## SA-Co Dataset + +We release 2 image benchmarks, [SA-Co/Gold](scripts/eval/gold/README.md) and +[SA-Co/Silver](scripts/eval/silver/README.md), and a video benchmark +[SA-Co/VEval](scripts/eval/veval/README.md). The datasets contain images (or videos) with annotated noun phrases. Each image/video and noun phrase pair is annotated with instance masks and unique IDs of each object matching the phrase. Phrases that have no matching objects (negative prompts) have no masks, shown in red font in the figure. See the linked READMEs for more details on how to download and run evaluations on the datasets. + +* HuggingFace host: [SA-Co/Gold](https://huggingface.co/datasets/facebook/SACo-Gold), [SA-Co/Silver](https://huggingface.co/datasets/facebook/SACo-Silver) and [SA-Co/VEval](https://huggingface.co/datasets/facebook/SACo-VEval) +* Roboflow host: [SA-Co/Gold](https://universe.roboflow.com/sa-co-gold), [SA-Co/Silver](https://universe.roboflow.com/sa-co-silver) and [SA-Co/VEval](https://universe.roboflow.com/sa-co-veval) + +![SA-Co dataset](assets/sa_co_dataset.jpg?raw=true) + +## Development + +To set up the development environment: + +```bash +pip install -e ".[dev,train]" +``` + +To format the code: + +```bash +ufmt format . +``` + +## Contributing + +See [contributing](CONTRIBUTING.md) and the +[code of conduct](CODE_OF_CONDUCT.md). + +## License + +This project is licensed under the SAM License - see the [LICENSE](LICENSE) file +for details. + +## Acknowledgements + +We would like to thank the following people for their contributions to the SAM 3 project: Alex He, Alexander Kirillov, +Alyssa Newcomb, Ana Paula Kirschner Mofarrej, Andrea Madotto, Andrew Westbury, Ashley Gabriel, Azita Shokpour, +Ben Samples, Bernie Huang, Carleigh Wood, Ching-Feng Yeh, Christian Puhrsch, Claudette Ward, Daniel Bolya, +Daniel Li, Facundo Figueroa, Fazila Vhora, George Orlin, Hanzi Mao, Helen Klein, Hu Xu, Ida Cheng, Jake Kinney, +Jiale Zhi, Jo Sampaio, Joel Schlosser, Justin Johnson, Kai Brown, Karen Bergan, Karla Martucci, Kenny Lehmann, +Maddie Mintz, Mallika Malhotra, Matt Ward, Michelle Chan, Michelle Restrepo, Miranda Hartley, Muhammad Maaz, +Nisha Deo, Peter Park, Phillip Thomas, Raghu Nayani, Rene Martinez Doehner, Robbie Adkins, Ross Girshik, Sasha +Mitts, Shashank Jain, Spencer Whitehead, Ty Toledano, Valentin Gabeur, Vincent Cho, Vivian Lee, William Ngan, +Xuehai He, Yael Yungster, Ziqi Pang, Ziyi Dou, Zoe Quake. + + diff --git a/detect_tools/sam3/README_TRAIN.md b/detect_tools/sam3/README_TRAIN.md new file mode 100644 index 0000000000000000000000000000000000000000..01904c27418b9c8765b933a9c08699a7d67b13a1 --- /dev/null +++ b/detect_tools/sam3/README_TRAIN.md @@ -0,0 +1,190 @@ +# Training + +This repository supports finetuning SAM3 models on custom datasets in multi-node setup or local execution. The training script is located at `sam3/train.py` and uses Hydra configuration management to handle complex training setups. + + +## Installation + +```bash +cd sam3 +pip install -e ".[train]" +``` + +### Training Script Usage + +The main training script is located at `sam3/train.py`. It uses Hydra configuration management to handle complex training setups. + +#### Basic Usage + +```bash +# Example: Train on Roboflow dataset +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml +# Example: Train on ODinW13 dataset +python sam3/train/train.py -c configs/odinw13/odinw_text_only_train.yaml +``` +Follow [`Roboflow 100-VL`](https://github.com/roboflow/rf100-vl/) to download the roboflow 100-vl datasets. Follow [`GLIP`](https://github.com/microsoft/GLIP) to download the ODinW datasets. The data folder should be organized as follows, and put your roboflow_vl_100_root and odinw_data_root in the job configs. +``` +roboflow_vl_100_root: + 13-lkc01 + train + valid + test + 2024-frc + actions + ... +odinw_data_root: + AerialMaritimeDrone + large + train + valid + test + Aquarium + ... +``` + +#### Command Line Arguments + +The training script supports several command line arguments: + +```bash +python sam3/train/train.py \ + -c CONFIG_NAME \ + [--use-cluster 0|1] \ + [--partition PARTITION_NAME] \ + [--account ACCOUNT_NAME] \ + [--qos QOS_NAME] \ + [--num-gpus NUM_GPUS] \ + [--num-nodes NUM_NODES] +``` + +**Arguments:** +- `-c, --config`: **Required.** Path to the configuration file (e.g., `sam3/train/configs/roboflow_v100_full_ft_100_images.yaml`) +- `--use-cluster`: Whether to launch on a cluster (0: local, 1: cluster). Default: uses config setting +- `--partition`: SLURM partition name for cluster execution +- `--account`: SLURM account name for cluster execution +- `--qos`: SLURM QOS (Quality of Service) setting +- `--num-gpus`: Number of GPUs per node. Default: uses config setting +- `--num-nodes`: Number of nodes for distributed training. Default: uses config setting + +#### Local Training Examples + +```bash +# Single GPU training +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 0 --num-gpus 1 + +# Multi-GPU training on a single node +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 0 --num-gpus 4 + +# Force local execution even if config specifies GPUs +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 0 +``` + +#### Cluster Training Examples + +```bash +# Basic cluster training with default settings from config +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 1 + +# Cluster training with specific SLURM settings +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml \ + --use-cluster 1 \ + --partition gpu_partition \ + --account my_account \ + --qos high_priority \ + --num-gpus 8 \ + --num-nodes 2 +``` + +### Configuration Files + +Training configurations are stored in `sam3/train/configs/`. The configuration files use Hydra's YAML format and support: + +- **Dataset Configuration**: Data paths, transforms, and loading parameters +- **Model Configuration**: Architecture settings, checkpoint paths, and model parameters +- **Training Configuration**: Batch sizes, learning rates, optimization settings +- **Launcher Configuration**: Distributed training and cluster settings +- **Logging Configuration**: TensorBoard, experiment tracking, and output directories + +#### Key Configuration Sections + +```yaml +# Paths to datasets and checkpoints +paths: + bpe_path: /path/to/bpe/file + dataset_root: /path/to/dataset + experiment_log_dir: /path/to/logs + +# Launcher settings for local/cluster execution +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + +# Cluster execution settings +submitit: + use_cluster: True + timeout_hour: 72 + cpus_per_task: 10 + partition: null + account: null +``` + +### Monitoring Training + +The training script automatically sets up logging and saves outputs to the experiment directory: + +```bash +# Logs are saved to the experiment_log_dir specified in config +experiment_log_dir/ +├── config.yaml # Original configuration +├── config_resolved.yaml # Resolved configuration with all variables expanded +├── checkpoints/ # Model checkpoints (if skip_checkpointing=False) +├── tensorboard/ # TensorBoard logs +├── logs/ # Text logs +└── submitit_logs/ # Cluster job logs (if using cluster) +``` + +You can monitor training progress using TensorBoard: + +```bash +tensorboard --logdir /path/to/experiment_log_dir/tensorboard +``` + +### Job Arrays for Dataset Sweeps + +The Roboflow and ODinW configuration supports job arrays for training multiple models on different datasets: + +This feature is specifically enabled via, +```yaml +submitit: + job_array: + num_tasks: 100 + task_index: 0 +``` + +The configuration includes a complete list of 100 Roboflow supercategories, and the `submitit.job_array.task_index` automatically selects which dataset to use based on the array job index. + +```bash +# Submit job array to train on different Roboflow datasets +# The job array index selects which dataset from all_roboflow_supercategories +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml \ + --use-cluster 1 +``` + +### Reproduce ODinW13 10-shot results +Running the following job will give the results on the ODinW13 seed 300, see `odinw_train.train_file: fewshot_train_shot10_seed300` in the config file. +```bash +# Example: Train on ODinW13 dataset +python sam3/train/train.py -c configs/odinw13/odinw_text_only_train.yaml +``` +Change `odinw_train.train_file` to `fewshot_train_shot10_seed30` and `fewshot_train_shot10_seed3` to get the results for the other two seeds. Final results are aggregated from the three seeds. Notice that a small number of jobs may diverge during training, in which case we just use the last checkpoint's result before it diverges. + + +### Eval Script Usage +With a similar setup as the training config, the training script `sam3/train.py` can also be used for evaluation, too, when setting `trainer.mode = val` in the job config. Run the following job will give the results on the zero-shot results on RF100-VL and ODinW13 datasets. +```bash +# Example: Evaluate on Roboflow dataset +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_eval.yaml +# Example: Evaluate on ODinW13 dataset +python sam3/train/train.py -c configs/odinw13/odinw_text_only.yaml +``` diff --git a/detect_tools/sam3/assets/init.py b/detect_tools/sam3/assets/init.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/detect_tools/sam3/pyproject.toml b/detect_tools/sam3/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..e4998de0f969cd0302f96c38cb355bcc0d4287c6 --- /dev/null +++ b/detect_tools/sam3/pyproject.toml @@ -0,0 +1,131 @@ +[build-system] +requires = ["setuptools>=61", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "sam3" +dynamic = ["version"] +description = "SAM3 (Segment Anything Model 3) implementation" +readme = "README.md" +requires-python = ">=3.8" +license = {file = "LICENSE"} +authors = [ + {name = "Meta AI Research"} +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "timm>=1.0.17", + "numpy==1.26", + "tqdm", + "ftfy==6.1.1", + "regex", + "iopath>=0.1.10", + "typing_extensions", + "huggingface_hub", +] + +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-cov", + "black==24.2.0", + "ufmt==2.8.0", + "ruff-api==0.1.0", + "usort==1.0.2", + "gitpython==3.1.31", + "yt-dlp", + "pandas", + "opencv-python", + "pycocotools", + "numba", + "python-rapidjson", +] +notebooks = [ + "matplotlib", + "jupyter", + "notebook", + "ipywidgets", + "ipycanvas", + "ipympl", + "pycocotools", + "decord", + "opencv-python", + "einops", + "scikit-image", + "scikit-learn", +] +train = [ + "hydra-core", + "submitit", + "tensorboard", + "zstandard", + "scipy", + "torchmetrics", + "fvcore", + "fairscale", + "scikit-image", + "scikit-learn", +] + +[project.urls] +"Homepage" = "https://github.com/facebookresearch/sam3" +"Bug Tracker" = "https://github.com/facebookresearch/sam3/issues" + +[tool.setuptools] +packages = ["sam3", "sam3.model"] + +[tool.setuptools.dynamic] +version = {attr = "sam3.__version__"} + +[tool.black] +line-length = 88 +target-version = ['py38', 'py39', 'py310', 'py311', 'py312'] +include = '\.pyi?$' + +[tool.isort] +profile = "black" +multi_line_output = 3 + +[tool.usort] +first_party_detection = false + +[tool.ufmt] +formatter = "ruff-api" + +[tool.mypy] +python_version = "3.12" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +disallow_incomplete_defs = true + +[[tool.mypy.overrides]] +module = [ + "torch.*", + "torchvision.*", + "timm.*", + "numpy.*", + "PIL.*", + "tqdm.*", + "ftfy.*", + "regex.*", + "iopath.*", +] +ignore_missing_imports = true + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = "test_*.py" +python_classes = "Test*" +python_functions = "test_*" diff --git a/detect_tools/sam3/sam3/__init__.py b/detect_tools/sam3/sam3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14270a6fdbd0198784934a0399f7a19b7405c65a --- /dev/null +++ b/detect_tools/sam3/sam3/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from .model_builder import build_sam3_image_model + +__version__ = "0.1.0" + +__all__ = ["build_sam3_image_model"] diff --git a/detect_tools/sam3/sam3/logger.py b/detect_tools/sam3/sam3/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..db9c0a61b76292e941804b233cc6c184b641158a --- /dev/null +++ b/detect_tools/sam3/sam3/logger.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import logging +import os + +LOG_LEVELS = { + "DEBUG": logging.DEBUG, + "INFO": logging.INFO, + "WARNING": logging.WARNING, + "ERROR": logging.ERROR, + "CRITICAL": logging.CRITICAL, +} + + +class ColoredFormatter(logging.Formatter): + """A command line formatter with different colors for each level.""" + + def __init__(self): + super().__init__() + reset = "\033[0m" + colors = { + logging.DEBUG: f"{reset}\033[36m", # cyan, + logging.INFO: f"{reset}\033[32m", # green + logging.WARNING: f"{reset}\033[33m", # yellow + logging.ERROR: f"{reset}\033[31m", # red + logging.CRITICAL: f"{reset}\033[35m", # magenta + } + fmt_str = "{color}%(levelname)s %(asctime)s %(process)d %(filename)s:%(lineno)4d:{reset} %(message)s" + self.formatters = { + level: logging.Formatter(fmt_str.format(color=color, reset=reset)) + for level, color in colors.items() + } + self.default_formatter = self.formatters[logging.INFO] + + def format(self, record): + formatter = self.formatters.get(record.levelno, self.default_formatter) + return formatter.format(record) + + +def get_logger(name, level=logging.INFO): + """A command line logger.""" + if "LOG_LEVEL" in os.environ: + level = os.environ["LOG_LEVEL"].upper() + assert ( + level in LOG_LEVELS + ), f"Invalid LOG_LEVEL: {level}, must be one of {list(LOG_LEVELS.keys())}" + level = LOG_LEVELS[level] + logger = logging.getLogger(name) + logger.setLevel(level) + logger.propagate = False + ch = logging.StreamHandler() + ch.setLevel(level) + ch.setFormatter(ColoredFormatter()) + logger.addHandler(ch) + return logger diff --git a/detect_tools/sam3/sam3/model/__init__.py b/detect_tools/sam3/sam3/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/model/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/model/act_ckpt_utils.py b/detect_tools/sam3/sam3/model/act_ckpt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c935cfc9ccdba8de3790422dacf8d77cbb2fbe8c --- /dev/null +++ b/detect_tools/sam3/sam3/model/act_ckpt_utils.py @@ -0,0 +1,114 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import inspect +from functools import wraps +from typing import Callable, TypeVar, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from torch.utils._pytree import tree_map_only + +# Type variables for better type hinting +T = TypeVar("T") +Module = TypeVar("Module", bound=nn.Module) + + +def activation_ckpt_wrapper(module: Union[nn.Module, Callable]) -> Callable: + """ + Wraps a given module to enable or disable activation checkpointing. + + Activation checkpointing (gradient checkpointing) trades compute for memory by + recomputing intermediate activations during the backward pass instead of storing + them in memory during the forward pass. + + When activation checkpointing is enabled, the wrapper expects only keyword arguments, + and it maps these to positional arguments based on the module's signature. + + Args: + module: The module or function to wrap with activation checkpointing + + Returns: + A wrapped callable that supports activation checkpointing + + Usage: + The returned wrapper function can be called with the same arguments as the + original module, with an additional `act_ckpt_enable` keyword argument to control + activation checkpointing and optional `use_reentrant` parameter. + + Example: + ```python + wrapped_module = activation_ckpt_wrapper(my_module) + output = wrapped_module(x=input_tensor, y=another_tensor, act_ckpt_enable=True) + ``` + """ + + @wraps(module) + def act_ckpt_wrapper( + *args, act_ckpt_enable: bool = True, use_reentrant: bool = False, **kwargs + ): + if act_ckpt_enable: + if len(args) > 0: + raise ValueError( + "This wrapper expects keyword arguments only when `act_ckpt_enable=True`" + ) + # Get the signature of the target function/module + callable_fn = module.forward if isinstance(module, nn.Module) else module + sig = inspect.signature(callable_fn) + # Create a mapping of parameter names to their default values + param_defaults = { + name: param.default for name, param in sig.parameters.items() + } + args = [] + for p_name in param_defaults.keys(): + if p_name in kwargs: + args.append(kwargs.pop(p_name)) + elif param_defaults[p_name] is not inspect.Parameter.empty: + # Set arg to default value if it's not in kwargs. Useful for primitive types or args that default to None + args.append(param_defaults[p_name]) + elif ( + sig.parameters[p_name].kind is not inspect.Parameter.VAR_KEYWORD + ): # Skip **kwargs parameter + raise ValueError(f"Missing positional argument: {p_name}") + + # Scan remaining kwargs for torch.Tensor + remaining_keys = list(kwargs.keys()) + for key in remaining_keys: + if isinstance(kwargs[key], torch.Tensor): + # Remove the tensor from kwargs, assuming it's not required by the module. + # If it is required, the module's signature should be modified to accept it as a positional or keyword argument. + kwargs[key] = "_REMOVED_BY_ACT_CKPT_WRAPPER_" + + ret = checkpoint.checkpoint( + module, *args, use_reentrant=use_reentrant, **kwargs + ) + else: + ret = module(*args, **kwargs) + + return ret + + return act_ckpt_wrapper + + +def clone_output_wrapper(f: Callable[..., T]) -> Callable[..., T]: + """ + Clone the CUDA output tensors of a function to avoid in-place operations. + + This wrapper is useful when working with torch.compile to prevent errors + related to in-place operations on tensors. + + Args: + f: The function whose CUDA tensor outputs should be cloned + + Returns: + A wrapped function that clones any CUDA tensor outputs + """ + + @wraps(f) + def wrapped(*args, **kwargs): + outputs = f(*args, **kwargs) + return tree_map_only( + torch.Tensor, lambda t: t.clone() if t.is_cuda else t, outputs + ) + + return wrapped diff --git a/detect_tools/sam3/sam3/model/box_ops.py b/detect_tools/sam3/sam3/model/box_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f88e4adff393ca8ef8100319fdb95ced6a9b4f6e --- /dev/null +++ b/detect_tools/sam3/sam3/model/box_ops.py @@ -0,0 +1,217 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +""" +Utilities for bounding box manipulation and GIoU. +""" + +from typing import Tuple + +import torch + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_cxcywh_to_xywh(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (w), (h)] + return torch.stack(b, dim=-1) + + +def box_xywh_to_xyxy(x): + x, y, w, h = x.unbind(-1) + b = [(x), (y), (x + w), (y + h)] + return torch.stack(b, dim=-1) + + +def box_xywh_to_cxcywh(x): + x, y, w, h = x.unbind(-1) + b = [(x + 0.5 * w), (y + 0.5 * h), (w), (h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_xywh(x): + x, y, X, Y = x.unbind(-1) + b = [(x), (y), (X - x), (Y - y)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +def box_area(boxes): + """ + Batched version of box area. Boxes should be in [x0, y0, x1, y1] format. + + Inputs: + - boxes: Tensor of shape (..., 4) + + Returns: + - areas: Tensor of shape (...,) + """ + x0, y0, x1, y1 = boxes.unbind(-1) + return (x1 - x0) * (y1 - y0) + + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float, device=masks.device) + x = torch.arange(0, w, dtype=torch.float, device=masks.device) + y, x = torch.meshgrid(y, x) + + x_mask = masks * x.unsqueeze(0) + x_max = x_mask.flatten(1).max(-1)[0] + 1 + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = masks * y.unsqueeze(0) + y_max = y_mask.flatten(1).max(-1)[0] + 1 + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + boxes = torch.stack([x_min, y_min, x_max, y_max], 1) + # Invalidate boxes corresponding to empty masks. + boxes = boxes * masks.flatten(-2).any(-1) + return boxes + + +def box_iou(boxes1, boxes2): + """ + Batched version of box_iou. Boxes should be in [x0, y0, x1, y1] format. + + Inputs: + - boxes1: Tensor of shape (..., N, 4) + - boxes2: Tensor of shape (..., M, 4) + + Returns: + - iou, union: Tensors of shape (..., N, M) + """ + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + # boxes1: (..., N, 4) -> (..., N, 1, 2) + # boxes2: (..., M, 4) -> (..., 1, M, 2) + lt = torch.max(boxes1[..., :, None, :2], boxes2[..., None, :, :2]) + rb = torch.min(boxes1[..., :, None, 2:], boxes2[..., None, :, 2:]) + + wh = (rb - lt).clamp(min=0) # (..., N, M, 2) + inter = wh[..., 0] * wh[..., 1] # (..., N, M) + + union = area1[..., None] + area2[..., None, :] - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Batched version of Generalized IoU from https://giou.stanford.edu/ + + Boxes should be in [x0, y0, x1, y1] format + + Inputs: + - boxes1: Tensor of shape (..., N, 4) + - boxes2: Tensor of shape (..., M, 4) + + Returns: + - giou: Tensor of shape (..., N, M) + """ + iou, union = box_iou(boxes1, boxes2) + + # boxes1: (..., N, 4) -> (..., N, 1, 2) + # boxes2: (..., M, 4) -> (..., 1, M, 2) + lt = torch.min(boxes1[..., :, None, :2], boxes2[..., None, :, :2]) + rb = torch.max(boxes1[..., :, None, 2:], boxes2[..., None, :, 2:]) + + wh = (rb - lt).clamp(min=0) # (..., N, M, 2) + area = wh[..., 0] * wh[..., 1] # (..., N, M) + + return iou - (area - union) / area + + +@torch.jit.script +def fast_diag_generalized_box_iou(boxes1, boxes2): + assert len(boxes1) == len(boxes2) + box1_xy = boxes1[:, 2:] + box1_XY = boxes1[:, :2] + box2_xy = boxes2[:, 2:] + box2_XY = boxes2[:, :2] + # assert (box1_xy >= box1_XY).all() + # assert (box2_xy >= box2_XY).all() + area1 = (box1_xy - box1_XY).prod(-1) + area2 = (box2_xy - box2_XY).prod(-1) + + lt = torch.max(box1_XY, box2_XY) # [N,2] + lt2 = torch.min(box1_XY, box2_XY) + rb = torch.min(box1_xy, box2_xy) # [N,2] + rb2 = torch.max(box1_xy, box2_xy) + + inter = (rb - lt).clamp(min=0).prod(-1) + tot_area = (rb2 - lt2).clamp(min=0).prod(-1) + + union = area1 + area2 - inter + + iou = inter / union + + return iou - (tot_area - union) / tot_area + + +@torch.jit.script +def fast_diag_box_iou(boxes1, boxes2): + assert len(boxes1) == len(boxes2) + box1_xy = boxes1[:, 2:] + box1_XY = boxes1[:, :2] + box2_xy = boxes2[:, 2:] + box2_XY = boxes2[:, :2] + # assert (box1_xy >= box1_XY).all() + # assert (box2_xy >= box2_XY).all() + area1 = (box1_xy - box1_XY).prod(-1) + area2 = (box2_xy - box2_XY).prod(-1) + + lt = torch.max(box1_XY, box2_XY) # [N,2] + rb = torch.min(box1_xy, box2_xy) # [N,2] + + inter = (rb - lt).clamp(min=0).prod(-1) + + union = area1 + area2 - inter + + iou = inter / union + + return iou + + +def box_xywh_inter_union( + boxes1: torch.Tensor, boxes2: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + # Asuumes boxes in xywh format + assert boxes1.size(-1) == 4 and boxes2.size(-1) == 4 + boxes1 = box_xywh_to_xyxy(boxes1) + boxes2 = box_xywh_to_xyxy(boxes2) + box1_tl_xy = boxes1[..., :2] + box1_br_xy = boxes1[..., 2:] + box2_tl_xy = boxes2[..., :2] + box2_br_xy = boxes2[..., 2:] + area1 = (box1_br_xy - box1_tl_xy).prod(-1) + area2 = (box2_br_xy - box2_tl_xy).prod(-1) + + assert (area1 >= 0).all() and (area2 >= 0).all() + tl = torch.max(box1_tl_xy, box2_tl_xy) + br = torch.min(box1_br_xy, box2_br_xy) + + inter = (br - tl).clamp(min=0).prod(-1) + union = area1 + area2 - inter + + return inter, union diff --git a/detect_tools/sam3/sam3/model/data_misc.py b/detect_tools/sam3/sam3/model/data_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..4bbcf551b90c691cbb583055995815d62cd89bed --- /dev/null +++ b/detect_tools/sam3/sam3/model/data_misc.py @@ -0,0 +1,209 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. +""" + +import collections +import re + +from dataclasses import dataclass, field as field_ptr_behaviour, fields, is_dataclass +from typing import Any, get_args, get_origin, List, Mapping, Optional, Sequence, Union + +import torch + + +MyTensor = Union[torch.Tensor, List[Any]] + + +def interpolate( + input, size=None, scale_factor=None, mode="nearest", align_corners=None +): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty channel sizes. + """ + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + assert ( + input.shape[0] != 0 or input.shape[1] != 0 + ), "At least one of the two first dimensions must be non zero" + + if input.shape[1] == 0: + # Pytorch doesn't support null dimension on the channel dimension, so we transpose to fake a null batch dim + return torch.nn.functional.interpolate( + input.transpose(0, 1), size, scale_factor, mode, align_corners + ).transpose(0, 1) + + # empty batch dimension is now supported in pytorch + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + +@dataclass +class BatchedPointer: + stage_ids: MyTensor + stage_ids__type = torch.long + query_ids: MyTensor + query_ids__type = torch.long + object_ids: MyTensor + object_ids__type = torch.long + ptr_mask: MyTensor + ptr_mask__type = torch.bool + ptr_types: MyTensor + ptr_types__type = torch.long + + +@dataclass +class FindStage: + img_ids: MyTensor + img_ids__type = torch.long + text_ids: MyTensor + text_ids__type = torch.long + + input_boxes: MyTensor + input_boxes__type = torch.float + input_boxes_mask: MyTensor + input_boxes_mask__type = torch.bool + input_boxes_label: MyTensor + input_boxes_label__type = torch.long + + input_points: MyTensor + input_points__type = torch.float + input_points_mask: MyTensor + input_points_mask__type = torch.bool + + # We track the object ids referred to by this query. + # This is beneficial for tracking in videos without the need for pointers. + object_ids: Optional[List[List]] = None # List of objects per query + + +@dataclass +class BatchedFindTarget: + # The number of boxes in each find query + num_boxes: MyTensor + num_boxes__type = torch.long + + # Target boxes in normalized CxCywh format + boxes: MyTensor + boxes__type = torch.float + # Target boxes in normalized CxCywh format but in padded representation + # as used in BinaryHungarianMatcherV2 (unlike the packed ones in `boxes`) + boxes_padded: MyTensor + boxes_padded__type = torch.float + + # For hybrid matching, we repeat the boxes + repeated_boxes: MyTensor + repeated_boxes__type = torch.float + + # Target Segmentation masks + segments: Optional[MyTensor] + segments__type = torch.bool + + # Target Semantic Segmentation masks + semantic_segments: Optional[MyTensor] + semantic_segments__type = torch.bool + + is_valid_segment: Optional[MyTensor] + is_valid_segment__type = torch.bool + + # Whether annotations are exhaustive for each query + is_exhaustive: MyTensor + is_exhaustive__type = torch.bool + + # The object id for each ground-truth box, in both packed and padded representations + object_ids: MyTensor + object_ids__type = torch.long + object_ids_padded: MyTensor + object_ids_padded__type = torch.long + + +@dataclass +class BatchedInferenceMetadata: + """All metadata required to post-process a find stage""" + + # Coco id that corresponds to the "image" for evaluation by the coco evaluator + coco_image_id: MyTensor + coco_image_id__type = torch.long + + # id in the original dataset, such that we can use the original evaluator + original_image_id: MyTensor + original_image_id__type = torch.long + + # Original category id (if we want to use the original evaluator) + original_category_id: MyTensor + original_category_id__type = torch.int + + # Size of the raw image (height, width) + original_size: MyTensor + original_size__type = torch.long + + # id of the object in the media (track_id for a video) + object_id: MyTensor + object_id__type = torch.long + + # index of the frame in the media (0 in the case of a single-frame media) + frame_index: MyTensor + frame_index__type = torch.long + + # Adding for relations inference + # get_text_input: List[Optional[str]] + + # Adding for TA conditional inference + is_conditioning_only: List[Optional[bool]] + + +@dataclass +class BatchedDatapoint: + img_batch: torch.Tensor + find_text_batch: List[str] + find_inputs: List[FindStage] + find_targets: List[BatchedFindTarget] + find_metadatas: List[BatchedInferenceMetadata] + raw_images: Optional[List[Any]] = None + + +def convert_my_tensors(obj): + def is_optional_field(field) -> bool: + return get_origin(field) is Union and type(None) in get_args(field) + + for field in fields(obj): + if is_dataclass(getattr(obj, field.name)): + convert_my_tensors(getattr(obj, field.name)) + continue + + field_type = field.type + if is_optional_field(field.type): + field_type = Union[get_args(field.type)[:-1]] # Get the Optional field type + + if field_type != MyTensor or getattr(obj, field.name) is None: + continue + + elif len(getattr(obj, field.name)) and isinstance( + getattr(obj, field.name)[0], torch.Tensor + ): + stack_dim = 0 + if field.name in [ + "input_boxes", + "input_boxes_label", + ]: + stack_dim = 1 + setattr( + obj, + field.name, + torch.stack(getattr(obj, field.name), dim=stack_dim).to( + getattr(obj, field.name + "__type") + ), + ) + else: + setattr( + obj, + field.name, + torch.as_tensor( + getattr(obj, field.name), dtype=getattr(obj, field.name + "__type") + ), + ) + return obj diff --git a/detect_tools/sam3/sam3/model/decoder.py b/detect_tools/sam3/sam3/model/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b1657ebd5706d901c84101012bb4c6e2fa4519 --- /dev/null +++ b/detect_tools/sam3/sam3/model/decoder.py @@ -0,0 +1,956 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +""" +Transformer decoder. +Inspired from Pytorch's version, adds the pre-norm variant +""" + +from typing import Any, Dict, List, Optional + +import numpy as np + +import torch + +from sam3.sam.transformer import RoPEAttention + +from torch import nn, Tensor +from torchvision.ops.roi_align import RoIAlign + +from .act_ckpt_utils import activation_ckpt_wrapper + +from .box_ops import box_cxcywh_to_xyxy + +from .model_misc import ( + gen_sineembed_for_position, + get_activation_fn, + get_clones, + inverse_sigmoid, + MLP, +) + + +class TransformerDecoderLayer(nn.Module): + def __init__( + self, + activation: str, + d_model: int, + dim_feedforward: int, + dropout: float, + cross_attention: nn.Module, + n_heads: int, + use_text_cross_attention: bool = False, + ): + super().__init__() + + # cross attention + self.cross_attn = cross_attention + self.dropout1 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.norm1 = nn.LayerNorm(d_model) + + # cross attention text + self.use_text_cross_attention = use_text_cross_attention + if use_text_cross_attention: + self.ca_text = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) + self.catext_dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.catext_norm = nn.LayerNorm(d_model) + + # self attention + self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) + self.dropout2 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.norm2 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.activation = get_activation_fn(activation) + self.dropout3 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.linear2 = nn.Linear(dim_feedforward, d_model) + self.dropout4 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.norm3 = nn.LayerNorm(d_model) + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, tgt): + with torch.amp.autocast(device_type="cuda", enabled=False): + tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout4(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward( + self, + # for tgt + tgt: Optional[Tensor], # nq, bs, d_model + tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos)) + tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos) + tgt_key_padding_mask: Optional[Tensor] = None, + tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4 + memory_text: Optional[Tensor] = None, # num_token, bs, d_model + text_attention_mask: Optional[Tensor] = None, # bs, num_token + # for memory + memory: Optional[Tensor] = None, # hw, bs, d_model + memory_key_padding_mask: Optional[Tensor] = None, + memory_level_start_index: Optional[Tensor] = None, # num_levels + memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2 + memory_pos: Optional[Tensor] = None, # pos for memory + # sa + self_attn_mask: Optional[Tensor] = None, # mask used for self-attention + cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention + # dac + dac=False, + dac_use_selfatt_ln=True, + presence_token=None, + # skip inside deformable attn + identity=0.0, + **kwargs, # additional kwargs for compatibility + ): + """ + Input: + - tgt/tgt_query_pos: nq, bs, d_model + - + """ + # self attention + if self.self_attn is not None: + if dac: + # we only apply self attention to the first half of the queries + assert tgt.shape[0] % 2 == 0 + num_o2o_queries = tgt.shape[0] // 2 + tgt_o2o = tgt[:num_o2o_queries] + tgt_query_pos_o2o = tgt_query_pos[:num_o2o_queries] + tgt_o2m = tgt[num_o2o_queries:] + else: + tgt_o2o = tgt + tgt_query_pos_o2o = tgt_query_pos + + if presence_token is not None: + tgt_o2o = torch.cat([presence_token, tgt_o2o], dim=0) + tgt_query_pos_o2o = torch.cat( + [torch.zeros_like(presence_token), tgt_query_pos_o2o], dim=0 + ) + tgt_query_pos = torch.cat( + [torch.zeros_like(presence_token), tgt_query_pos], dim=0 + ) + + q = k = self.with_pos_embed(tgt_o2o, tgt_query_pos_o2o) + tgt2 = self.self_attn(q, k, tgt_o2o, attn_mask=self_attn_mask)[0] + tgt_o2o = tgt_o2o + self.dropout2(tgt2) + if dac: + if not dac_use_selfatt_ln: + tgt_o2o = self.norm2(tgt_o2o) + tgt = torch.cat((tgt_o2o, tgt_o2m), dim=0) # Recombine + if dac_use_selfatt_ln: + tgt = self.norm2(tgt) + else: + tgt = tgt_o2o + tgt = self.norm2(tgt) + + if self.use_text_cross_attention: + tgt2 = self.ca_text( + self.with_pos_embed(tgt, tgt_query_pos), + memory_text, + memory_text, + key_padding_mask=text_attention_mask, + )[0] + tgt = tgt + self.catext_dropout(tgt2) + tgt = self.catext_norm(tgt) + + if presence_token is not None: + presence_token_mask = torch.zeros_like(cross_attn_mask[:, :1, :]) + cross_attn_mask = torch.cat( + [presence_token_mask, cross_attn_mask], dim=1 + ) # (bs*nheads, 1+nq, hw) + + # Cross attention to image + tgt2 = self.cross_attn( + query=self.with_pos_embed(tgt, tgt_query_pos), + key=self.with_pos_embed(memory, memory_pos), + value=memory, + attn_mask=cross_attn_mask, + key_padding_mask=( + memory_key_padding_mask.transpose(0, 1) + if memory_key_padding_mask is not None + else None + ), + )[0] + + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + + # ffn + tgt = self.forward_ffn(tgt) + + presence_token_out = None + if presence_token is not None: + presence_token_out = tgt[:1] + tgt = tgt[1:] + + return tgt, presence_token_out + + +class TransformerDecoder(nn.Module): + def __init__( + self, + d_model: int, + frozen: bool, + interaction_layer, + layer, + num_layers: int, + num_queries: int, + return_intermediate: bool, + box_refine: bool = False, + num_o2m_queries: int = 0, + dac: bool = False, + boxRPB: str = "none", + # Experimental: An object query for SAM 2 tasks + instance_query: bool = False, + # Defines the number of additional instance queries, + # 1 or 4 are the most likely for single vs multi mask support + num_instances: int = 1, # Irrelevant if instance_query is False + dac_use_selfatt_ln: bool = True, + use_act_checkpoint: bool = False, + compile_mode=None, + presence_token: bool = False, + clamp_presence_logits: bool = True, + clamp_presence_logit_max_val: float = 10.0, + use_normed_output_consistently: bool = True, + separate_box_head_instance: bool = False, + separate_norm_instance: bool = False, + resolution: Optional[int] = None, + stride: Optional[int] = None, + ): + super().__init__() + self.d_model = d_model + self.layers = get_clones(layer, num_layers) + self.fine_layers = ( + get_clones(interaction_layer, num_layers) + if interaction_layer is not None + else [None] * num_layers + ) + self.num_layers = num_layers + self.num_queries = num_queries + self.dac = dac + if dac: + self.num_o2m_queries = num_queries + tot_num_queries = num_queries + else: + self.num_o2m_queries = num_o2m_queries + tot_num_queries = num_queries + num_o2m_queries + self.norm = nn.LayerNorm(d_model) + self.return_intermediate = return_intermediate + self.bbox_embed = MLP(d_model, d_model, 4, 3) + self.query_embed = nn.Embedding(tot_num_queries, d_model) + self.instance_query_embed = None + self.instance_query_reference_points = None + self.use_instance_query = instance_query + self.num_instances = num_instances + self.use_normed_output_consistently = use_normed_output_consistently + + self.instance_norm = nn.LayerNorm(d_model) if separate_norm_instance else None + self.instance_bbox_embed = None + if separate_box_head_instance: + self.instance_bbox_embed = MLP(d_model, d_model, 4, 3) + if instance_query: + self.instance_query_embed = nn.Embedding(num_instances, d_model) + self.box_refine = box_refine + if box_refine: + nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) + nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) + + self.reference_points = nn.Embedding(num_queries, 4) + if instance_query: + self.instance_reference_points = nn.Embedding(num_instances, 4) + + assert boxRPB in ["none", "log", "linear", "both"] + self.boxRPB = boxRPB + if boxRPB != "none": + try: + nheads = self.layers[0].cross_attn_image.num_heads + except AttributeError: + nheads = self.layers[0].cross_attn.num_heads + + n_input = 4 if boxRPB == "both" else 2 + self.boxRPB_embed_x = MLP(n_input, d_model, nheads, 2) + self.boxRPB_embed_y = MLP(n_input, d_model, nheads, 2) + self.compilable_cord_cache = None + self.compilable_stored_size = None + self.coord_cache = {} + + if resolution is not None and stride is not None: + feat_size = resolution // stride + coords_h, coords_w = self._get_coords( + feat_size, feat_size, device="cuda" + ) + self.compilable_cord_cache = (coords_h, coords_w) + self.compilable_stored_size = (feat_size, feat_size) + + self.roi_pooler = ( + RoIAlign(output_size=7, spatial_scale=1, sampling_ratio=-1, aligned=True) + if interaction_layer is not None + else None + ) + if frozen: + for p in self.parameters(): + p.requires_grad_(False) + + self.presence_token = None + self.clamp_presence_logits = clamp_presence_logits + self.clamp_presence_logit_max_val = clamp_presence_logit_max_val + if presence_token: + self.presence_token = nn.Embedding(1, d_model) + self.presence_token_head = MLP(d_model, d_model, 1, 3) + self.presence_token_out_norm = nn.LayerNorm(d_model) + + self.ref_point_head = MLP(2 * self.d_model, self.d_model, self.d_model, 2) + self.dac_use_selfatt_ln = dac_use_selfatt_ln + self.use_act_checkpoint = use_act_checkpoint + + nn.init.normal_(self.query_embed.weight.data) + if self.instance_query_embed is not None: + nn.init.normal_(self.instance_query_embed.weight.data) + + assert self.roi_pooler is None + assert self.return_intermediate, "support return_intermediate only" + assert self.box_refine, "support box refine only" + + self.compile_mode = compile_mode + self.compiled = False + # We defer compilation till after the first forward, to first warm-up the boxRPB cache + + # assign layer index to each layer so that some layers can decide what to do + # based on which layer index they are (e.g. cross attention to memory bank only + # in selected layers) + for layer_idx, layer in enumerate(self.layers): + layer.layer_idx = layer_idx + + @staticmethod + def _get_coords(H, W, device): + coords_h = torch.arange(0, H, device=device, dtype=torch.float32) / H + coords_w = torch.arange(0, W, device=device, dtype=torch.float32) / W + return coords_h, coords_w + + def _get_rpb_matrix(self, reference_boxes, feat_size): + H, W = feat_size + boxes_xyxy = box_cxcywh_to_xyxy(reference_boxes).transpose(0, 1) + bs, num_queries, _ = boxes_xyxy.shape + if self.compilable_cord_cache is None: + self.compilable_cord_cache = self._get_coords(H, W, reference_boxes.device) + self.compilable_stored_size = (H, W) + + if torch.compiler.is_dynamo_compiling() or self.compilable_stored_size == ( + H, + W, + ): + # good, hitting the cache, will be compilable + coords_h, coords_w = self.compilable_cord_cache + else: + # cache miss, will create compilation issue + # In case we're not compiling, we'll still rely on the dict-based cache + if feat_size not in self.coord_cache: + self.coord_cache[feat_size] = self._get_coords( + H, W, reference_boxes.device + ) + coords_h, coords_w = self.coord_cache[feat_size] + + assert coords_h.shape == (H,) + assert coords_w.shape == (W,) + + deltas_y = coords_h.view(1, -1, 1) - boxes_xyxy.reshape(-1, 1, 4)[:, :, 1:4:2] + deltas_y = deltas_y.view(bs, num_queries, -1, 2) + deltas_x = coords_w.view(1, -1, 1) - boxes_xyxy.reshape(-1, 1, 4)[:, :, 0:3:2] + deltas_x = deltas_x.view(bs, num_queries, -1, 2) + + if self.boxRPB in ["log", "both"]: + deltas_x_log = deltas_x * 8 # normalize to -8, 8 + deltas_x_log = ( + torch.sign(deltas_x_log) + * torch.log2(torch.abs(deltas_x_log) + 1.0) + / np.log2(8) + ) + + deltas_y_log = deltas_y * 8 # normalize to -8, 8 + deltas_y_log = ( + torch.sign(deltas_y_log) + * torch.log2(torch.abs(deltas_y_log) + 1.0) + / np.log2(8) + ) + if self.boxRPB == "log": + deltas_x = deltas_x_log + deltas_y = deltas_y_log + else: + deltas_x = torch.cat([deltas_x, deltas_x_log], dim=-1) + deltas_y = torch.cat([deltas_y, deltas_y_log], dim=-1) + + if self.training: + assert self.use_act_checkpoint, "activation ckpt not enabled in decoder" + deltas_x = activation_ckpt_wrapper(self.boxRPB_embed_x)( + x=deltas_x, + act_ckpt_enable=self.training and self.use_act_checkpoint, + ) # bs, num_queries, W, n_heads + deltas_y = activation_ckpt_wrapper(self.boxRPB_embed_y)( + x=deltas_y, + act_ckpt_enable=self.training and self.use_act_checkpoint, + ) # bs, num_queries, H, n_heads + + if not torch.compiler.is_dynamo_compiling(): + assert deltas_x.shape[:3] == (bs, num_queries, W) + assert deltas_y.shape[:3] == (bs, num_queries, H) + + B = deltas_y.unsqueeze(3) + deltas_x.unsqueeze( + 2 + ) # bs, num_queries, H, W, n_heads + if not torch.compiler.is_dynamo_compiling(): + assert B.shape[:4] == (bs, num_queries, H, W) + B = B.flatten(2, 3) # bs, num_queries, H*W, n_heads + B = B.permute(0, 3, 1, 2) # bs, n_heads, num_queries, H*W + B = B.contiguous() # memeff attn likes ordered strides + if not torch.compiler.is_dynamo_compiling(): + assert B.shape[2:] == (num_queries, H * W) + return B + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + reference_boxes: Optional[Tensor] = None, # num_queries, bs, 4 + # for memory + level_start_index: Optional[Tensor] = None, # num_levels + spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2 + valid_ratios: Optional[Tensor] = None, + # for text + memory_text: Optional[Tensor] = None, + text_attention_mask: Optional[Tensor] = None, + # if `apply_dac` is None, it will default to `self.dac` + apply_dac: Optional[bool] = None, + is_instance_prompt=False, + decoder_extra_kwargs: Optional[Dict] = None, + # ROI memory bank + obj_roi_memory_feat=None, + obj_roi_memory_mask=None, + box_head_trk=None, + ): + """ + Input: + - tgt: nq, bs, d_model + - memory: \\sum{hw}, bs, d_model + - pos: \\sum{hw}, bs, d_model + - reference_boxes: nq, bs, 4 (after sigmoid) + - valid_ratios/spatial_shapes: bs, nlevel, 2 + """ + if memory_mask is not None: + assert ( + self.boxRPB == "none" + ), "inputting a memory_mask in the presence of boxRPB is unexpected/not implemented" + + apply_dac = apply_dac if apply_dac is not None else self.dac + if apply_dac: + assert (tgt.shape[0] == self.num_queries) or ( + self.use_instance_query + and (tgt.shape[0] == self.instance_query_embed.num_embeddings) + ) + + tgt = tgt.repeat(2, 1, 1) + # note that we don't tile tgt_mask, since DAC doesn't + # use self-attention in o2m queries + if reference_boxes is not None: + assert (reference_boxes.shape[0] == self.num_queries) or ( + self.use_instance_query + and ( + reference_boxes.shape[0] + == self.instance_query_embed.num_embeddings + ) + ) + reference_boxes = reference_boxes.repeat(2, 1, 1) + + bs = tgt.shape[1] + intermediate = [] + intermediate_presence_logits = [] + presence_feats = None + + if self.box_refine: + if reference_boxes is None: + # In this case, we're in a one-stage model, so we generate the reference boxes + reference_boxes = self.reference_points.weight.unsqueeze(1) + reference_boxes = ( + reference_boxes.repeat(2, bs, 1) + if apply_dac + else reference_boxes.repeat(1, bs, 1) + ) + reference_boxes = reference_boxes.sigmoid() + intermediate_ref_boxes = [reference_boxes] + else: + reference_boxes = None + intermediate_ref_boxes = None + + output = tgt + presence_out = None + if self.presence_token is not None and is_instance_prompt is False: + # expand to batch dim + presence_out = self.presence_token.weight[None].expand(1, bs, -1) + + box_head = self.bbox_embed + if is_instance_prompt and self.instance_bbox_embed is not None: + box_head = self.instance_bbox_embed + + out_norm = self.norm + if is_instance_prompt and self.instance_norm is not None: + out_norm = self.instance_norm + + for layer_idx, layer in enumerate(self.layers): + reference_points_input = ( + reference_boxes[:, :, None] + * torch.cat([valid_ratios, valid_ratios], -1)[None, :] + ) # nq, bs, nlevel, 4 + + query_sine_embed = gen_sineembed_for_position( + reference_points_input[:, :, 0, :], self.d_model + ) # nq, bs, d_model*2 + + # conditional query + query_pos = self.ref_point_head(query_sine_embed) # nq, bs, d_model + + if self.boxRPB != "none" and reference_boxes is not None: + assert ( + spatial_shapes.shape[0] == 1 + ), "only single scale support implemented" + memory_mask = self._get_rpb_matrix( + reference_boxes, + (spatial_shapes[0, 0], spatial_shapes[0, 1]), + ) + memory_mask = memory_mask.flatten(0, 1) # (bs*n_heads, nq, H*W) + if self.training: + assert ( + self.use_act_checkpoint + ), "Activation checkpointing not enabled in the decoder" + output, presence_out = activation_ckpt_wrapper(layer)( + tgt=output, + tgt_query_pos=query_pos, + tgt_query_sine_embed=query_sine_embed, + tgt_key_padding_mask=tgt_key_padding_mask, + tgt_reference_points=reference_points_input, + memory_text=memory_text, + text_attention_mask=text_attention_mask, + memory=memory, + memory_key_padding_mask=memory_key_padding_mask, + memory_level_start_index=level_start_index, + memory_spatial_shapes=spatial_shapes, + memory_pos=pos, + self_attn_mask=tgt_mask, + cross_attn_mask=memory_mask, + dac=apply_dac, + dac_use_selfatt_ln=self.dac_use_selfatt_ln, + presence_token=presence_out, + **(decoder_extra_kwargs or {}), + act_ckpt_enable=self.training and self.use_act_checkpoint, + # ROI memory bank + obj_roi_memory_feat=obj_roi_memory_feat, + obj_roi_memory_mask=obj_roi_memory_mask, + ) + + # iter update + if self.box_refine: + reference_before_sigmoid = inverse_sigmoid(reference_boxes) + if box_head_trk is None: + # delta_unsig = self.bbox_embed(output) + if not self.use_normed_output_consistently: + delta_unsig = box_head(output) + else: + delta_unsig = box_head(out_norm(output)) + else: + # box_head_trk use a separate box head for tracking queries + Q_det = decoder_extra_kwargs["Q_det"] + assert output.size(0) >= Q_det + delta_unsig_det = self.bbox_embed(output[:Q_det]) + delta_unsig_trk = box_head_trk(output[Q_det:]) + delta_unsig = torch.cat([delta_unsig_det, delta_unsig_trk], dim=0) + outputs_unsig = delta_unsig + reference_before_sigmoid + new_reference_points = outputs_unsig.sigmoid() + + reference_boxes = new_reference_points.detach() + if layer_idx != self.num_layers - 1: + intermediate_ref_boxes.append(new_reference_points) + else: + raise NotImplementedError("not implemented yet") + + intermediate.append(out_norm(output)) + if self.presence_token is not None and is_instance_prompt is False: + # norm, mlp head + intermediate_layer_presence_logits = self.presence_token_head( + self.presence_token_out_norm(presence_out) + ).squeeze(-1) + + # clamp to mitigate numerical issues + if self.clamp_presence_logits: + intermediate_layer_presence_logits.clamp( + min=-self.clamp_presence_logit_max_val, + max=self.clamp_presence_logit_max_val, + ) + + intermediate_presence_logits.append(intermediate_layer_presence_logits) + presence_feats = presence_out.clone() + + if not self.compiled and self.compile_mode is not None: + self.forward = torch.compile( + self.forward, mode=self.compile_mode, fullgraph=True + ) + self.compiled = True + + return ( + torch.stack(intermediate), + torch.stack(intermediate_ref_boxes), + ( + torch.stack(intermediate_presence_logits) + if self.presence_token is not None and is_instance_prompt is False + else None + ), + presence_feats, + ) + + +class TransformerEncoderCrossAttention(nn.Module): + def __init__( + self, + d_model: int, + frozen: bool, + pos_enc_at_input: bool, + layer, + num_layers: int, + use_act_checkpoint: bool = False, + batch_first: bool = False, # Do layers expect batch first input? + # which layers to exclude cross attention? default: None, means all + # layers use cross attention + remove_cross_attention_layers: Optional[list] = None, + ): + super().__init__() + self.d_model = d_model + self.layers = get_clones(layer, num_layers) + self.num_layers = num_layers + self.norm = nn.LayerNorm(d_model) + self.pos_enc_at_input = pos_enc_at_input + self.use_act_checkpoint = use_act_checkpoint + + if frozen: + for p in self.parameters(): + p.requires_grad_(False) + + self.batch_first = batch_first + + # remove cross attention layers if specified + self.remove_cross_attention_layers = [False] * self.num_layers + if remove_cross_attention_layers is not None: + for i in remove_cross_attention_layers: + self.remove_cross_attention_layers[i] = True + assert len(self.remove_cross_attention_layers) == len(self.layers) + + for i, remove_cross_attention in enumerate(self.remove_cross_attention_layers): + if remove_cross_attention: + self.layers[i].cross_attn_image = None + self.layers[i].norm2 = None + self.layers[i].dropout2 = None + + def forward( + self, + src, # self-attention inputs + prompt, # cross-attention inputs + src_mask: Optional[Tensor] = None, # att.mask for self-attention inputs + prompt_mask: Optional[Tensor] = None, # att.mask for cross-attention inputs + src_key_padding_mask: Optional[Tensor] = None, + prompt_key_padding_mask: Optional[Tensor] = None, + src_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs + prompt_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs + feat_sizes: Optional[list] = None, + num_obj_ptr_tokens: int = 0, # number of object pointer *tokens* + ): + if isinstance(src, list): + assert isinstance(src_key_padding_mask, list) and isinstance(src_pos, list) + assert len(src) == len(src_key_padding_mask) == len(src_pos) == 1 + src, src_key_padding_mask, src_pos = ( + src[0], + src_key_padding_mask[0], + src_pos[0], + ) + + assert ( + src.shape[1] == prompt.shape[1] + ), "Batch size must be the same for src and prompt" + + output = src + + if self.pos_enc_at_input and src_pos is not None: + output = output + 0.1 * src_pos + + if self.batch_first: + # Convert to batch first + output = output.transpose(0, 1) + src_pos = src_pos.transpose(0, 1) + prompt = prompt.transpose(0, 1) + prompt_pos = prompt_pos.transpose(0, 1) + + for layer in self.layers: + kwds = {} + if isinstance(layer.cross_attn_image, RoPEAttention): + kwds = {"num_k_exclude_rope": num_obj_ptr_tokens} + + output = activation_ckpt_wrapper(layer)( + tgt=output, + memory=prompt, + tgt_mask=src_mask, + memory_mask=prompt_mask, + tgt_key_padding_mask=src_key_padding_mask, + memory_key_padding_mask=prompt_key_padding_mask, + pos=prompt_pos, + query_pos=src_pos, + dac=False, + attn_bias=None, + act_ckpt_enable=self.training and self.use_act_checkpoint, + **kwds, + ) + normed_output = self.norm(output) + + if self.batch_first: + # Convert back to seq first + normed_output = normed_output.transpose(0, 1) + src_pos = src_pos.transpose(0, 1) + + return { + "memory": normed_output, + "pos_embed": src_pos, + "padding_mask": src_key_padding_mask, + } + + +class TransformerDecoderLayerv1(nn.Module): + def __init__( + self, + activation: str, + cross_attention: nn.Module, + d_model: int, + dim_feedforward: int, + dropout: float, + pos_enc_at_attn: bool, + pos_enc_at_cross_attn_keys: bool, + pos_enc_at_cross_attn_queries: bool, + pre_norm: bool, + self_attention: nn.Module, + ): + super().__init__() + self.d_model = d_model + self.dim_feedforward = dim_feedforward + self.dropout_value = dropout + self.self_attn = self_attention + self.cross_attn_image = cross_attention + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation_str = activation + self.activation = get_activation_fn(activation) + self.pre_norm = pre_norm + + self.pos_enc_at_attn = pos_enc_at_attn + self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries + self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys + + def forward_post( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + **kwargs, + ): + q = k = tgt + query_pos if self.pos_enc_at_attn else tgt + + # Self attention + tgt2 = self.self_attn( + q, + k, + value=tgt, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + + # Cross attention to image + tgt2 = self.cross_attn_image( + query=tgt + query_pos if self.pos_enc_at_cross_attn_queries else tgt, + key=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + + # FFN + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre( + self, + tgt, + memory, + dac: bool = False, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + attn_bias: Optional[Tensor] = None, + **kwargs, + ): + if dac: + # we only apply self attention to the first half of the queries + assert tgt.shape[0] % 2 == 0 + other_tgt = tgt[tgt.shape[0] // 2 :] + tgt = tgt[: tgt.shape[0] // 2] + tgt2 = self.norm1(tgt) + q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 + tgt2 = self.self_attn( + q, + k, + value=tgt2, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout1(tgt2) + if dac: + # Recombine + tgt = torch.cat((tgt, other_tgt), dim=0) + tgt2 = self.norm2(tgt) + tgt2 = self.cross_attn_image( + query=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, + key=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + attn_bias=attn_bias, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward( + self, + tgt, + memory, + dac: bool = False, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + attn_bias: Optional[Tensor] = None, + **kwds: Any, + ) -> torch.Tensor: + fwd_fn = self.forward_pre if self.pre_norm else self.forward_post + return fwd_fn( + tgt, + memory, + dac=dac, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, + query_pos=query_pos, + attn_bias=attn_bias, + **kwds, + ) + + +class TransformerDecoderLayerv2(TransformerDecoderLayerv1): + def __init__(self, cross_attention_first=False, *args: Any, **kwds: Any): + super().__init__(*args, **kwds) + self.cross_attention_first = cross_attention_first + + def _forward_sa(self, tgt, query_pos): + # Self-Attention + tgt2 = self.norm1(tgt) + q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 + tgt2 = self.self_attn(q, k, v=tgt2) + tgt = tgt + self.dropout1(tgt2) + return tgt + + def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0): + if self.cross_attn_image is None: + return tgt + + kwds = {} + if num_k_exclude_rope > 0: + assert isinstance(self.cross_attn_image, RoPEAttention) + kwds = {"num_k_exclude_rope": num_k_exclude_rope} + + # Cross-Attention + tgt2 = self.norm2(tgt) + tgt2 = self.cross_attn_image( + q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, + k=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + v=memory, + **kwds, + ) + tgt = tgt + self.dropout2(tgt2) + return tgt + + def forward_pre( + self, + tgt, + memory, + dac: bool, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + attn_bias: Optional[Tensor] = None, + num_k_exclude_rope: int = 0, + ): + assert dac is False + assert tgt_mask is None + assert memory_mask is None + assert tgt_key_padding_mask is None + assert memory_key_padding_mask is None + assert attn_bias is None + + if self.cross_attention_first: + tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) + tgt = self._forward_sa(tgt, query_pos) + else: + tgt = self._forward_sa(tgt, query_pos) + tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) + + # MLP + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward(self, *args: Any, **kwds: Any) -> torch.Tensor: + if self.pre_norm: + return self.forward_pre(*args, **kwds) + raise NotImplementedError diff --git a/detect_tools/sam3/sam3/model/edt.py b/detect_tools/sam3/sam3/model/edt.py new file mode 100644 index 0000000000000000000000000000000000000000..9448c1d3b0ee26d05f203dd6050abfa62e9a0846 --- /dev/null +++ b/detect_tools/sam3/sam3/model/edt.py @@ -0,0 +1,173 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Triton kernel for euclidean distance transform (EDT)""" + +import torch +import triton +import triton.language as tl + +""" +Disclaimer: This implementation is not meant to be extremely efficient. A CUDA kernel would likely be more efficient. +Even in Triton, there may be more suitable algorithms. + +The goal of this kernel is to mimic cv2.distanceTransform(input, cv2.DIST_L2, 0). +Recall that the euclidean distance transform (EDT) calculates the L2 distance to the closest zero pixel for each pixel of the source image. + +For images of size NxN, the naive algorithm would be to compute pairwise distances between every pair of points, leading to a O(N^4) algorithm, which is obviously impractical. +One can do better using the following approach: +- First, compute the distance to the closest point in the same row. We can write it as Row_EDT[i,j] = min_k (sqrt((k-j)^2) if input[i,k]==0 else +infinity). With a naive implementation, this step has a O(N^3) complexity +- Then, because of triangular inequality, we notice that the EDT for a given location [i,j] is the min of the row EDTs in the same column. EDT[i,j] = min_k Row_EDT[k, j]. This is also O(N^3) + +Overall, this algorithm is quite amenable to parallelization, and has a complexity O(N^3). Can we do better? + +It turns out that we can leverage the structure of the L2 distance (nice and convex) to find the minimum in a more efficient way. +We follow the algorithm from "Distance Transforms of Sampled Functions" (https://cs.brown.edu/people/pfelzens/papers/dt-final.pdf), which is also what's implemented in opencv + +For a single dimension EDT, we can compute the EDT of an arbitrary function F, that we discretize over the grid. Note that for the binary EDT that we're interested in, we can set F(i,j) = 0 if input[i,j]==0 else +infinity +For now, we'll compute the EDT squared, and will take the sqrt only at the very end. +The basic idea is that each point at location i spawns a parabola around itself, with a bias equal to F(i). So specifically, we're looking at the parabola (x - i)^2 + F(i) +When we're looking for the row EDT at location j, we're effectively looking for min_i (x-i)^2 + F(i). In other word we want to find the lowest parabola at location j. + +To do this efficiently, we need to maintain the lower envelope of the union of parabolas. This can be constructed on the fly using a sort of stack approach: + - every time we want to add a new parabola, we check if it may be covering the current right-most parabola. If so, then that parabola was useless, so we can pop it from the stack + - repeat until we can't find any more parabola to pop. Then push the new one. + +This algorithm runs in O(N) for a single row, so overall O(N^2) when applied to all rows +Similarly as before, we notice that we can decompose the algorithm for rows and columns, leading to an overall run-time of O(N^2) + +This algorithm is less suited for to GPUs, since the one-dimensional EDT computation is quite sequential in nature. However, we can parallelize over batch and row dimensions. +In Triton, things are particularly bad at the moment, since there is no support for reading/writing to the local memory at a specific index (a local gather is coming soon, see https://github.com/triton-lang/triton/issues/974, but no mention of writing, ie scatter) +One could emulate these operations with masking, but in initial tests, it proved to be worst than naively reading and writing to the global memory. My guess is that the cache is compensating somewhat for the repeated single-point accesses. + + +The timing obtained on a H100 for a random batch of masks of dimension 256 x 1024 x 1024 are as follows: +- OpenCV: 1780ms (including round-trip to cpu, but discounting the fact that it introduces a synchronization point) +- triton, O(N^3) algo: 627ms +- triton, O(N^2) algo: 322ms + +Overall, despite being quite naive, this implementation is roughly 5.5x faster than the openCV cpu implem + +""" + + +@triton.jit +def edt_kernel(inputs_ptr, outputs_ptr, v, z, height, width, horizontal: tl.constexpr): + # This is a somewhat verbatim implementation of the efficient 1D EDT algorithm described above + # It can be applied horizontally or vertically depending if we're doing the first or second stage. + # It's parallelized across batch+row (or batch+col if horizontal=False) + # TODO: perhaps the implementation can be revisited if/when local gather/scatter become available in triton + batch_id = tl.program_id(axis=0) + if horizontal: + row_id = tl.program_id(axis=1) + block_start = (batch_id * height * width) + row_id * width + length = width + stride = 1 + else: + col_id = tl.program_id(axis=1) + block_start = (batch_id * height * width) + col_id + length = height + stride = width + + # This will be the index of the right most parabola in the envelope ("the top of the stack") + k = 0 + for q in range(1, length): + # Read the function value at the current location. Note that we're doing a singular read, not very efficient + cur_input = tl.load(inputs_ptr + block_start + (q * stride)) + # location of the parabola on top of the stack + r = tl.load(v + block_start + (k * stride)) + # associated boundary + z_k = tl.load(z + block_start + (k * stride)) + # value of the function at the parabola location + previous_input = tl.load(inputs_ptr + block_start + (r * stride)) + # intersection between the two parabolas + s = (cur_input - previous_input + q * q - r * r) / (q - r) / 2 + + # we'll pop as many parabolas as required + while s <= z_k and k - 1 >= 0: + k = k - 1 + r = tl.load(v + block_start + (k * stride)) + z_k = tl.load(z + block_start + (k * stride)) + previous_input = tl.load(inputs_ptr + block_start + (r * stride)) + s = (cur_input - previous_input + q * q - r * r) / (q - r) / 2 + + # Store the new one + k = k + 1 + tl.store(v + block_start + (k * stride), q) + tl.store(z + block_start + (k * stride), s) + if k + 1 < length: + tl.store(z + block_start + ((k + 1) * stride), 1e9) + + # Last step, we read the envelope to find the min in every location + k = 0 + for q in range(length): + while ( + k + 1 < length + and tl.load( + z + block_start + ((k + 1) * stride), mask=(k + 1) < length, other=q + ) + < q + ): + k += 1 + r = tl.load(v + block_start + (k * stride)) + d = q - r + old_value = tl.load(inputs_ptr + block_start + (r * stride)) + tl.store(outputs_ptr + block_start + (q * stride), old_value + d * d) + + +def edt_triton(data: torch.Tensor): + """ + Computes the Euclidean Distance Transform (EDT) of a batch of binary images. + + Args: + data: A tensor of shape (B, H, W) representing a batch of binary images. + + Returns: + A tensor of the same shape as data containing the EDT. + It should be equivalent to a batched version of cv2.distanceTransform(input, cv2.DIST_L2, 0) + """ + assert data.dim() == 3 + assert data.is_cuda + B, H, W = data.shape + data = data.contiguous() + + # Allocate the "function" tensor. Implicitly the function is 0 if data[i,j]==0 else +infinity + output = torch.where(data, 1e18, 0.0) + assert output.is_contiguous() + + # Scratch tensors for the parabola stacks + parabola_loc = torch.zeros(B, H, W, dtype=torch.uint32, device=data.device) + parabola_inter = torch.empty(B, H, W, dtype=torch.float, device=data.device) + parabola_inter[:, :, 0] = -1e18 + parabola_inter[:, :, 1] = 1e18 + + # Grid size (number of blocks) + grid = (B, H) + + # Launch initialization kernel + edt_kernel[grid]( + output.clone(), + output, + parabola_loc, + parabola_inter, + H, + W, + horizontal=True, + ) + + # reset the parabola stacks + parabola_loc.zero_() + parabola_inter[:, :, 0] = -1e18 + parabola_inter[:, :, 1] = 1e18 + + grid = (B, W) + edt_kernel[grid]( + output.clone(), + output, + parabola_loc, + parabola_inter, + H, + W, + horizontal=False, + ) + # don't forget to take sqrt at the end + return output.sqrt() diff --git a/detect_tools/sam3/sam3/model/encoder.py b/detect_tools/sam3/sam3/model/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..842bc56c59e59406a3c6f50096d542ccc2db033d --- /dev/null +++ b/detect_tools/sam3/sam3/model/encoder.py @@ -0,0 +1,594 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +# Based on https://github.com/IDEA-Research/GroundingDINO + +from typing import Any, Dict, List, Optional, Tuple + +import torch +from torch import nn, Tensor + +from .act_ckpt_utils import activation_ckpt_wrapper +from .model_misc import get_activation_fn, get_clones, get_valid_ratio + + +class TransformerEncoderLayer(nn.Module): + """ + Transformer encoder layer that performs self-attention followed by cross-attention. + + This layer was previously called TransformerDecoderLayer but was renamed to better + reflect its role in the architecture. It processes input sequences through self-attention + and then cross-attention with another input (typically image features). + + The layer supports both pre-norm and post-norm configurations, as well as + positional encoding at different stages of the attention mechanism. + """ + + def __init__( + self, + activation: str, + cross_attention: nn.Module, + d_model: int, + dim_feedforward: int, + dropout: float, + pos_enc_at_attn: bool, + pos_enc_at_cross_attn_keys: bool, + pos_enc_at_cross_attn_queries: bool, + pre_norm: bool, + self_attention: nn.Module, + ): + """ + Initialize a transformer encoder layer. + + Args: + activation: Activation function to use in the feedforward network + cross_attention: Cross-attention module for attending to image features + d_model: Model dimension/hidden size + dim_feedforward: Dimension of the feedforward network + dropout: Dropout probability + pos_enc_at_attn: Whether to add positional encodings at self-attention + pos_enc_at_cross_attn_keys: Whether to add positional encodings to keys in cross-attention + pos_enc_at_cross_attn_queries: Whether to add positional encodings to queries in cross-attention + pre_norm: Whether to use pre-norm (True) or post-norm (False) architecture + self_attention: Self-attention module + """ + super().__init__() + self.d_model = d_model + self.dim_feedforward = dim_feedforward + self.dropout_value = dropout + self.self_attn = self_attention + self.cross_attn_image = cross_attention + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation_str = activation + self.activation = get_activation_fn(activation) + self.pre_norm = pre_norm + + self.pos_enc_at_attn = pos_enc_at_attn + self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries + self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys + + self.layer_idx = None + + def forward_post( + self, + tgt: Tensor, + memory: Tensor, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + **kwargs, + ) -> Tensor: + """ + Forward pass for post-norm architecture. + + In post-norm architecture, normalization is applied after attention and feedforward operations. + + Args: + tgt: Input tensor to be processed + memory: Memory tensor for cross-attention + tgt_mask: Mask for self-attention + memory_mask: Mask for cross-attention + tgt_key_padding_mask: Key padding mask for self-attention + memory_key_padding_mask: Key padding mask for cross-attention + pos: Positional encoding for memory + query_pos: Positional encoding for query + **kwargs: Additional keyword arguments + + Returns: + Processed tensor + """ + q = k = tgt + query_pos if self.pos_enc_at_attn else tgt + + # Self attention + tgt2 = self.self_attn( + q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + + # Cross attention to image + tgt2 = self.cross_attn_image( + query=tgt + query_pos if self.pos_enc_at_cross_attn_queries else tgt, + key=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + + # FFN + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre( + self, + tgt: Tensor, + memory: Tensor, + dac: bool = False, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + # attn_bias: Optional[Tensor] = None, + # **kwargs, + ) -> Tensor: + """ + Forward pass for pre-norm architecture. + + In pre-norm architecture, normalization is applied before attention and feedforward operations. + + Args: + tgt: Input tensor to be processed + memory: Memory tensor for cross-attention + dac: Whether to use Divide-and-Conquer attention + tgt_mask: Mask for self-attention + memory_mask: Mask for cross-attention + tgt_key_padding_mask: Key padding mask for self-attention + memory_key_padding_mask: Key padding mask for cross-attention + pos: Positional encoding for memory + query_pos: Positional encoding for query + attn_bias: Optional attention bias tensor + **kwargs: Additional keyword arguments + + Returns: + Processed tensor + """ + if dac: + # we only apply self attention to the first half of the queries + assert tgt.shape[0] % 2 == 0 + other_tgt = tgt[tgt.shape[0] // 2 :] + tgt = tgt[: tgt.shape[0] // 2] + tgt2 = self.norm1(tgt) + q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 + tgt2 = self.self_attn( + q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] + tgt = tgt + self.dropout1(tgt2) + if dac: + # Recombine + tgt = torch.cat((tgt, other_tgt), dim=0) + tgt2 = self.norm2(tgt) + tgt2 = self.cross_attn_image( + query=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, + key=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + # attn_bias=attn_bias, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward( + self, + tgt: Tensor, + memory: Tensor, + dac: bool = False, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + # attn_bias: Optional[Tensor] = None, + # **kwds: Any, + ) -> torch.Tensor: + """ + Forward pass for the transformer encoder layer. + + Args: + tgt: Input tensor to be processed + memory: Memory tensor (e.g., image features) for cross-attention + dac: Whether to use Divide-and-Conquer attention (only apply self-attention to first half) + tgt_mask: Mask for self-attention + memory_mask: Mask for cross-attention + tgt_key_padding_mask: Key padding mask for self-attention + memory_key_padding_mask: Key padding mask for cross-attention + pos: Positional encoding for memory + query_pos: Positional encoding for query + attn_bias: Optional attention bias tensor + **kwds: Additional keyword arguments + + Returns: + Processed tensor after self-attention, cross-attention, and feedforward network + """ + fwd_fn = self.forward_pre if self.pre_norm else self.forward_post + return fwd_fn( + tgt, + memory, + dac=dac, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, + query_pos=query_pos, + # attn_bias=attn_bias, + # **kwds, + ) + + +class TransformerEncoder(nn.Module): + """ + Transformer encoder that processes multi-level features. + + This encoder takes multi-level features (e.g., from a backbone network) and processes + them through a stack of transformer encoder layers. It supports features from multiple + levels (e.g., different resolutions) and can apply activation checkpointing for memory + efficiency during training. + + Args: + layer: The encoder layer to be stacked multiple times + num_layers: Number of encoder layers to stack + d_model: Model dimension/hidden size + num_feature_levels: Number of feature levels to process + frozen: Whether to freeze the parameters of this module + use_act_checkpoint: Whether to use activation checkpointing during training + """ + + def __init__( + self, + layer: nn.Module, + num_layers: int, + d_model: int, + num_feature_levels: int, + frozen: bool = False, + use_act_checkpoint: bool = False, + ): + super().__init__() + self.layers = get_clones(layer, num_layers) + self.num_layers = num_layers + + self.num_feature_levels = num_feature_levels + self.level_embed = None + if num_feature_levels > 1: + self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) + + if frozen: + for p in self.parameters(): + p.requires_grad_(False) + + self.use_act_checkpoint = use_act_checkpoint + + # assign layer index to each layer so that some layers can decide what to do + # based on which layer index they are (e.g. cross attention to memory bank only + # in selected layers) + for layer_idx, layer in enumerate(self.layers): + layer.layer_idx = layer_idx + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + with torch.no_grad(): + reference_points_list = [] + for lvl, (H_, W_) in enumerate(spatial_shapes): + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H_ - 0.5, H_, dtype=torch.float32, device=device + ), + torch.linspace( + 0.5, W_ - 0.5, W_, dtype=torch.float32, device=device + ), + ) + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + + return reference_points + + def _prepare_multilevel_features(self, srcs, masks, pos_embeds): + assert ( + len(srcs) == self.num_feature_levels + ), "mismatch between expected and received # of feature levels" + + src_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + has_mask = masks is not None and masks[0] is not None + for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): + bs, c, h, w = src.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + + src = src.flatten(2).transpose(1, 2) # bs, hw, c + if has_mask: + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c + if self.level_embed is not None: + lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) + else: + lvl_pos_embed = pos_embed + lvl_pos_embed_flatten.append(lvl_pos_embed) + src_flatten.append(src) + if has_mask: + mask_flatten.append(mask) + src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c + mask_flatten = torch.cat(mask_flatten, 1) if has_mask else None # bs, \sum{hxw} + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c + spatial_shapes = torch.tensor( + spatial_shapes, dtype=torch.long, device=src_flatten.device + ) + level_start_index = torch.cat( + ( + spatial_shapes.new_zeros((1,)), + spatial_shapes.prod(1).cumsum(0)[:-1], + ) + ) + if has_mask: + valid_ratios = torch.stack([get_valid_ratio(m) for m in masks], 1) + else: + valid_ratios = torch.ones( + (src_flatten.shape[0], self.num_feature_levels, 2), + device=src_flatten.device, + ) + + return ( + src_flatten, + mask_flatten, + lvl_pos_embed_flatten, + level_start_index, + valid_ratios, + spatial_shapes, + ) + + def forward( + self, + src: List[Tensor], + src_key_padding_masks: Optional[List[Tensor]] = None, + pos: Optional[List[Tensor]] = None, + prompt: Optional[Tensor] = None, + prompt_key_padding_mask: Optional[Tensor] = None, + encoder_extra_kwargs: Optional[Dict] = None, + ) -> Tuple[Tensor, Optional[Tensor], Tensor, Tensor, Tensor, Tensor]: + """ + Process multi-level features through the transformer encoder. + + Args: + src: List of multi-level features, each with shape (batch_size, channels, height, width) + src_key_padding_masks: List of padding masks for each feature level, each with shape (batch_size, height, width) + pos: List of positional embeddings for each feature level, each with shape (batch_size, channels, height, width) + prompt: Optional text/prompt features to attend to, with shape (seq_len, batch_size, d_model) + prompt_key_padding_mask: Optional padding mask for prompt, with shape (batch_size, seq_len) + encoder_extra_kwargs: Optional additional arguments to pass to each encoder layer + + Returns: + A tuple containing: + - output: Processed features with shape (seq_len, batch_size, d_model) + - key_padding_masks_flatten: Flattened padding masks + - lvl_pos_embed_flatten: Flattened positional embeddings + - level_start_index: Starting indices for each feature level + - spatial_shapes: Spatial dimensions of each feature level + - valid_ratios: Valid ratios for each feature level + """ + assert ( + len(src) == self.num_feature_levels + ), "must be equal to num_feature_levels" + if src_key_padding_masks is not None: + assert len(src_key_padding_masks) == self.num_feature_levels + if pos is not None: + assert len(pos) == self.num_feature_levels + # Flatten multilevel feats and add level pos embeds + ( + src_flatten, + key_padding_masks_flatten, + lvl_pos_embed_flatten, + level_start_index, + valid_ratios, + spatial_shapes, + ) = self._prepare_multilevel_features(src, src_key_padding_masks, pos) + + reference_points = self.get_reference_points( + spatial_shapes, valid_ratios, device=src_flatten.device + ) + + output = src_flatten + for layer in self.layers: + layer_kwargs = {} + + assert isinstance(layer, TransformerEncoderLayer) + layer_kwargs["memory"] = prompt + layer_kwargs["memory_key_padding_mask"] = prompt_key_padding_mask + layer_kwargs["query_pos"] = lvl_pos_embed_flatten + layer_kwargs["tgt"] = output + layer_kwargs["tgt_key_padding_mask"] = key_padding_masks_flatten + + if self.training: + assert self.use_act_checkpoint, "activation ckpt not enabled in encoder" + if encoder_extra_kwargs is not None: + layer_kwargs.update(encoder_extra_kwargs) + output = activation_ckpt_wrapper(layer)( + **layer_kwargs, + act_ckpt_enable=self.training and self.use_act_checkpoint, + ) + # return as seq first + return ( + output.transpose(0, 1), + ( + key_padding_masks_flatten.transpose(0, 1) + if key_padding_masks_flatten is not None + else None + ), + lvl_pos_embed_flatten.transpose(0, 1), + level_start_index, + spatial_shapes, + valid_ratios, + ) + + +class TransformerEncoderFusion(TransformerEncoder): + """ + Transformer encoder that fuses text and image features. + + This encoder extends TransformerEncoder to handle both text and image features, + with the ability to add pooled text features to image features for better + cross-modal fusion. It supports torch.compile for performance optimization. + + Args: + layer: The encoder layer to be stacked multiple times + num_layers: Number of encoder layers to stack + d_model: Model dimension/hidden size + num_feature_levels: Number of feature levels to process + add_pooled_text_to_img_feat: Whether to add pooled text features to image features + pool_text_with_mask: Whether to use the mask when pooling text features + compile_mode: Mode for torch.compile, or None to disable compilation + **kwargs: Additional arguments to pass to the parent class + """ + + def __init__( + self, + layer: nn.Module, + num_layers: int, + d_model: int, + num_feature_levels: int, + add_pooled_text_to_img_feat: bool = True, + pool_text_with_mask: bool = False, + compile_mode: Optional[str] = None, + **kwargs, + ): + super().__init__( + layer, + num_layers, + d_model, + num_feature_levels, + **kwargs, + ) + self.add_pooled_text_to_img_feat = add_pooled_text_to_img_feat + if self.add_pooled_text_to_img_feat: + self.text_pooling_proj = nn.Linear(d_model, d_model) + self.pool_text_with_mask = pool_text_with_mask + if compile_mode is not None: + self.forward = torch.compile( + self.forward, mode=compile_mode, fullgraph=True + ) + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + # Not needed here + return None + + def forward( + self, + src: List[Tensor], + prompt: Tensor, + src_key_padding_mask: Optional[List[Tensor]] = None, + src_pos: Optional[List[Tensor]] = None, + prompt_key_padding_mask: Optional[Tensor] = None, + prompt_pos: Optional[Tensor] = None, + feat_sizes: Optional[List[int]] = None, + encoder_extra_kwargs: Optional[Dict] = None, + ): + # Restore spatial shapes of vision + bs = src[0].shape[1] # seq first + if feat_sizes is not None: + assert len(feat_sizes) == len(src) + if src_key_padding_mask is None: + src_key_padding_mask = [None] * len(src) + for i, (h, w) in enumerate(feat_sizes): + src[i] = src[i].reshape(h, w, bs, -1).permute(2, 3, 0, 1) + src_pos[i] = src_pos[i].reshape(h, w, bs, -1).permute(2, 3, 0, 1) + src_key_padding_mask[i] = ( + src_key_padding_mask[i].reshape(h, w, bs).permute(2, 0, 1) + if src_key_padding_mask[i] is not None + else None + ) + else: + assert all( + x.dim == 4 for x in src + ), "expected list of (bs, c, h, w) tensors" + + if self.add_pooled_text_to_img_feat: + # Fusion: Add mean pooled text to image features + pooled_text = pool_text_feat( + prompt, prompt_key_padding_mask, self.pool_text_with_mask + ) + pooled_text = self.text_pooling_proj(pooled_text)[ + ..., None, None + ] # prompt is seq first + src = [x.add_(pooled_text) for x in src] + + ( + out, + key_padding_masks_flatten, + lvl_pos_embed_flatten, + level_start_index, + spatial_shapes, + valid_ratios, + ) = super().forward( + src, + src_key_padding_masks=src_key_padding_mask, + pos=src_pos, + prompt=prompt.transpose(0, 1), + prompt_key_padding_mask=prompt_key_padding_mask, + encoder_extra_kwargs=encoder_extra_kwargs, + ) + + return { + "memory": out, + "padding_mask": key_padding_masks_flatten, + "pos_embed": lvl_pos_embed_flatten, + "memory_text": prompt, + "level_start_index": level_start_index, + "spatial_shapes": spatial_shapes, + "valid_ratios": valid_ratios, + } + + +def pool_text_feat(prompt, prompt_mask, pool_with_mask): + # prompt has shape (seq, bs, dim) + if not pool_with_mask: + return prompt.mean(dim=0) + + # prompt_mask has shape (bs, seq), where False is valid and True is padding + assert prompt_mask.dim() == 2 + # is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding + is_valid = (~prompt_mask).float().permute(1, 0)[..., None] + # num_valid has shape (bs, 1) + num_valid = torch.clamp(torch.sum(is_valid, dim=0), min=1.0) + + # mean pool over all the valid tokens + pooled_text = (prompt * is_valid).sum(dim=0) / num_valid + return pooled_text diff --git a/detect_tools/sam3/sam3/model/geometry_encoders.py b/detect_tools/sam3/sam3/model/geometry_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..bff29172b96d0ab5d4b2fc795da1d282d1c5f541 --- /dev/null +++ b/detect_tools/sam3/sam3/model/geometry_encoders.py @@ -0,0 +1,850 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from typing import Tuple + +import torch +import torch.nn as nn +import torchvision +from typing_extensions import override + +from .act_ckpt_utils import activation_ckpt_wrapper +from .box_ops import box_cxcywh_to_xyxy + +from .model_misc import get_clones + + +def is_right_padded(mask): + """Given a padding mask (following pytorch convention, 1s for padded values), + returns whether the padding is on the right or not.""" + return (mask.long() == torch.sort(mask.long(), dim=-1)[0]).all() + + +def concat_padded_sequences(seq1, mask1, seq2, mask2, return_index: bool = False): + """ + Concatenates two right-padded sequences, such that the resulting sequence + is contiguous and also right-padded. + + Following pytorch's convention, tensors are sequence first, and the mask are + batch first, with 1s for padded values. + + :param seq1: A tensor of shape (seq1_length, batch_size, hidden_size). + :param mask1: A tensor of shape (batch_size, seq1_length). + :param seq2: A tensor of shape (seq2_length, batch_size, hidden_size). + :param mask2: A tensor of shape (batch_size, seq2_length). + :param return_index: If True, also returns the index of the ids of the element of seq2 + in the concatenated sequence. This can be used to retrieve the elements of seq2 + :return: A tuple (concatenated_sequence, concatenated_mask) if return_index is False, + otherwise (concatenated_sequence, concatenated_mask, index). + """ + seq1_length, batch_size, hidden_size = seq1.shape + seq2_length, batch_size, hidden_size = seq2.shape + + assert batch_size == seq1.size(1) == seq2.size(1) == mask1.size(0) == mask2.size(0) + assert hidden_size == seq1.size(2) == seq2.size(2) + assert seq1_length == mask1.size(1) + assert seq2_length == mask2.size(1) + + torch._assert_async(is_right_padded(mask1)) + torch._assert_async(is_right_padded(mask2)) + + actual_seq1_lengths = (~mask1).sum(dim=-1) + actual_seq2_lengths = (~mask2).sum(dim=-1) + + final_lengths = actual_seq1_lengths + actual_seq2_lengths + max_length = seq1_length + seq2_length + concatenated_mask = ( + torch.arange(max_length, device=seq2.device)[None].repeat(batch_size, 1) + >= final_lengths[:, None] + ) + + # (max_len, batch_size, hidden_size) + concatenated_sequence = torch.zeros( + (max_length, batch_size, hidden_size), device=seq2.device, dtype=seq2.dtype + ) + concatenated_sequence[:seq1_length, :, :] = seq1 + + # At this point, the element of seq1 are in the right place + # We just need to shift the elements of seq2 + + index = torch.arange(seq2_length, device=seq2.device)[:, None].repeat(1, batch_size) + index = index + actual_seq1_lengths[None] + + concatenated_sequence = concatenated_sequence.scatter( + 0, index[:, :, None].expand(-1, -1, hidden_size), seq2 + ) + + if return_index: + return concatenated_sequence, concatenated_mask, index + + return concatenated_sequence, concatenated_mask + + +class Prompt: + """Utility class to manipulate geometric prompts. + + We expect the sequences in pytorch convention, that is sequence first, batch second + The dimensions are expected as follows: + box_embeddings shape: N_boxes x B x C_box + box_mask shape: B x N_boxes. Can be None if nothing is masked out + point_embeddings shape: N_points x B x C_point + point_mask shape: B x N_points. Can be None if nothing is masked out + mask_embeddings shape: N_masks x B x 1 x H_mask x W_mask + mask_mask shape: B x N_masks. Can be None if nothing is masked out + + We also store positive/negative labels. These tensors are also stored batch-first + If they are None, we'll assume positive labels everywhere + box_labels: long tensor of shape N_boxes x B + point_labels: long tensor of shape N_points x B + mask_labels: long tensor of shape N_masks x B + """ + + def __init__( + self, + box_embeddings=None, + box_mask=None, + point_embeddings=None, + point_mask=None, + box_labels=None, + point_labels=None, + mask_embeddings=None, + mask_mask=None, # Attention mask for mask prompt + mask_labels=None, + ): + # Check for null prompt + if ( + box_embeddings is None + and point_embeddings is None + and mask_embeddings is None + ): + self.box_embeddings = None + self.box_labels = None + self.box_mask = None + self.point_embeddings = None + self.point_labels = None + self.point_mask = None + self.mask_embeddings = None + self.mask_mask = None + # Masks are assumed positive only for now. + self.mask_labels = None + return + # Get sequence lengths and device + box_seq_len, point_seq_len, mask_seq_len, bs, device = ( + self._init_seq_len_and_device( + box_embeddings, point_embeddings, mask_embeddings + ) + ) + + # Initialize embeds, labels, attention masks. + box_embeddings, box_labels, box_mask = self._init_box( + box_embeddings, box_labels, box_mask, box_seq_len, bs, device + ) + point_embeddings, point_labels, point_mask = self._init_point( + point_embeddings, point_labels, point_mask, point_seq_len, bs, device + ) + mask_embeddings, mask_labels, mask_mask = self._init_mask( + mask_embeddings, mask_labels, mask_mask, mask_seq_len, bs, device + ) + + # Dimension checks + assert ( + box_embeddings is not None + and list(box_embeddings.shape[:2]) + == [ + box_seq_len, + bs, + ] + ), f"Wrong dimension for box embeddings. Expected [{box_seq_len}, {bs}, *] got {box_embeddings.shape}" + assert ( + box_mask is not None + and list(box_mask.shape) + == [ + bs, + box_seq_len, + ] + ), f"Wrong dimension for box mask. Expected [{bs}, {box_seq_len}] got {box_mask.shape}" + assert ( + point_embeddings is not None + and list(point_embeddings.shape[:2]) + == [ + point_seq_len, + bs, + ] + ), f"Wrong dimension for point embeddings. Expected [{point_seq_len}, {bs}, *] got {point_embeddings.shape}" + assert ( + point_mask is not None + and list(point_mask.shape) + == [ + bs, + point_seq_len, + ] + ), f"Wrong dimension for point mask. Expected [{bs}, {point_seq_len}] got {point_mask.shape}" + assert ( + box_labels is not None + and list(box_labels.shape) + == [ + box_seq_len, + bs, + ] + ), f"Wrong dimension for box labels. Expected [{box_seq_len}, {bs}] got {box_labels.shape}" + assert ( + point_labels is not None + and list(point_labels.shape) + == [ + point_seq_len, + bs, + ] + ), f"Wrong dimension for point labels. Expected [{point_seq_len}, {bs}] got {point_labels.shape}" + assert ( + # Allowed to be None, we leave it to the encoder to check for validity before encoding. + mask_embeddings is None + or list(mask_embeddings.shape[:2]) + == [ + mask_seq_len, + bs, + ] + ), f"Wrong dimension for mask embeddings. Expected [{mask_seq_len}, {bs}, *] got {mask_embeddings.shape}" + assert ( + mask_mask is None + or list(mask_mask.shape) + == [ + bs, + mask_seq_len, + ] + ), f"Wrong dimension for mask attn. mask. Expected [{bs}, {mask_seq_len}] got {mask_mask.shape}" + + # Device checks + assert ( + box_embeddings is not None and box_embeddings.device == device + ), f"Expected box embeddings to be on device {device}, got {box_embeddings.device}" + assert ( + box_mask is not None and box_mask.device == device + ), f"Expected box mask to be on device {device}, got {box_mask.device}" + assert ( + box_labels is not None and box_labels.device == device + ), f"Expected box labels to be on device {device}, got {box_labels.device}" + assert ( + point_embeddings is not None and point_embeddings.device == device + ), f"Expected point embeddings to be on device {device}, got {point_embeddings.device}" + assert ( + point_mask is not None and point_mask.device == device + ), f"Expected point mask to be on device {device}, got {point_mask.device}" + assert ( + point_labels is not None and point_labels.device == device + ), f"Expected point labels to be on device {device}, got {point_labels.device}" + assert ( + mask_embeddings is None or mask_embeddings.device == device + ), f"Expected mask embeddings to be on device {device}, got {mask_embeddings.device}" + assert ( + mask_mask is None or mask_mask.device == device + ), f"Expected mask attn. mask to be on device {device}, got {mask_mask.device}" + + self.box_embeddings = box_embeddings + self.point_embeddings = point_embeddings + self.box_mask = box_mask + self.point_mask = point_mask + self.box_labels = box_labels + self.point_labels = point_labels + self.mask_embeddings = mask_embeddings + self.mask_labels = mask_labels + self.mask_mask = mask_mask + + def _init_seq_len_and_device( + self, box_embeddings, point_embeddings, mask_embeddings + ): + box_seq_len = point_seq_len = mask_seq_len = 0 + bs = None + device = None + if box_embeddings is not None: + bs = box_embeddings.shape[1] + box_seq_len = box_embeddings.shape[0] + device = box_embeddings.device + + if point_embeddings is not None: + point_seq_len = point_embeddings.shape[0] + if bs is not None: + assert ( + bs == point_embeddings.shape[1] + ), f"Batch size mismatch between box and point embeddings. Got {bs} and {point_embeddings.shape[1]}." + else: + bs = point_embeddings.shape[1] + if device is not None: + assert ( + device == point_embeddings.device + ), "Device mismatch between box and point embeddings" + else: + device = point_embeddings.device + + if mask_embeddings is not None: + mask_seq_len = mask_embeddings.shape[0] + if bs is not None: + assert ( + bs == mask_embeddings.shape[1] + ), f"Batch size mismatch between box/point and mask embedding. Got {bs} and {mask_embeddings.shape[1]}" + else: + bs = mask_embeddings.shape[1] + if device is not None: + assert ( + device == mask_embeddings.device + ), "Device mismatch between box/point and mask embeddings." + else: + device = mask_embeddings.device + + return box_seq_len, point_seq_len, mask_seq_len, bs, device + + def _init_box(self, box_embeddings, box_labels, box_mask, box_seq_len, bs, device): + if box_embeddings is None: + box_embeddings = torch.zeros(box_seq_len, bs, 4, device=device) + if box_labels is None: + box_labels = torch.ones(box_seq_len, bs, device=device, dtype=torch.long) + if box_mask is None: + box_mask = torch.zeros(bs, box_seq_len, device=device, dtype=torch.bool) + return box_embeddings, box_labels, box_mask + + def _init_point( + self, point_embeddings, point_labels, point_mask, point_seq_len, bs, device + ): + """ + Identical to _init_box. Except that C=2 for points (vs. 4 for boxes). + """ + if point_embeddings is None: + point_embeddings = torch.zeros(point_seq_len, bs, 2, device=device) + if point_labels is None: + point_labels = torch.ones( + point_seq_len, bs, device=device, dtype=torch.long + ) + if point_mask is None: + point_mask = torch.zeros(bs, point_seq_len, device=device, dtype=torch.bool) + return point_embeddings, point_labels, point_mask + + def _init_mask( + self, mask_embeddings, mask_labels, mask_mask, mask_seq_len, bs, device + ): + # NOTE: Mask embeddings can be of arbitrary resolution, so we don't initialize it here. + # In case we append new mask, we check that its resolution matches exisiting ones (if any). + # In case mask_embeddings is None, we should never encode it. + if mask_labels is None: + mask_labels = torch.ones(mask_seq_len, bs, device=device, dtype=torch.long) + if mask_mask is None: + mask_mask = torch.zeros(bs, mask_seq_len, device=device, dtype=torch.bool) + return mask_embeddings, mask_labels, mask_mask + + def append_boxes(self, boxes, labels, mask=None): + if self.box_embeddings is None: + self.box_embeddings = boxes + self.box_labels = labels + self.box_mask = mask + return + + bs = self.box_embeddings.shape[1] + assert boxes.shape[1] == labels.shape[1] == bs + assert list(boxes.shape[:2]) == list(labels.shape[:2]) + if mask is None: + mask = torch.zeros( + bs, boxes.shape[0], dtype=torch.bool, device=boxes.device + ) + + self.box_labels, _ = concat_padded_sequences( + self.box_labels.unsqueeze(-1), self.box_mask, labels.unsqueeze(-1), mask + ) + self.box_labels = self.box_labels.squeeze(-1) + self.box_embeddings, self.box_mask = concat_padded_sequences( + self.box_embeddings, self.box_mask, boxes, mask + ) + + def append_points(self, points, labels, mask=None): + if self.point_embeddings is None: + self.point_embeddings = points + self.point_labels = labels + self.point_mask = mask + return + + bs = self.point_embeddings.shape[1] + assert points.shape[1] == labels.shape[1] == bs + assert list(points.shape[:2]) == list(labels.shape[:2]) + if mask is None: + mask = torch.zeros( + bs, points.shape[0], dtype=torch.bool, device=points.device + ) + + self.point_labels, _ = concat_padded_sequences( + self.point_labels.unsqueeze(-1), self.point_mask, labels.unsqueeze(-1), mask + ) + self.point_labels = self.point_labels.squeeze(-1) + self.point_embeddings, self.point_mask = concat_padded_sequences( + self.point_embeddings, self.point_mask, points, mask + ) + + def append_masks(self, masks, labels=None, attn_mask=None): + if labels is not None: + assert list(masks.shape[:2]) == list(labels.shape[:2]) + if self.mask_embeddings is None: + self.mask_embeddings = masks + mask_seq_len, bs = masks.shape[:2] + if labels is None: + self.mask_labels = torch.ones( + mask_seq_len, bs, device=masks.device, dtype=torch.long + ) + else: + self.mask_labels = labels + if attn_mask is None: + self.mask_mask = torch.zeros( + bs, mask_seq_len, device=masks.device, dtype=torch.bool + ) + else: + self.mask_mask = attn_mask + else: + raise NotImplementedError("Only one mask per prompt is supported.") + + def clone(self): + return Prompt( + box_embeddings=( + None if self.box_embeddings is None else self.box_embeddings.clone() + ), + box_mask=None if self.box_mask is None else self.box_mask.clone(), + point_embeddings=( + None if self.point_embeddings is None else self.point_embeddings.clone() + ), + point_mask=None if self.point_mask is None else self.point_mask.clone(), + box_labels=None if self.box_labels is None else self.box_labels.clone(), + point_labels=( + None if self.point_labels is None else self.point_labels.clone() + ), + ) + + +class MaskEncoder(nn.Module): + """ + Base class for mask encoders. + """ + + def __init__( + self, + mask_downsampler: nn.Module, + position_encoding: nn.Module, + ): + super().__init__() + self.mask_downsampler = mask_downsampler + self.position_encoding = position_encoding + + def forward(self, masks, *args, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]: + masks = self.mask_downsampler(masks) + masks_pos = self.position_encoding(masks).to(masks.dtype) + + return masks, masks_pos + + +class FusedMaskEncoder(MaskEncoder): + """ + Identical to memory.SimpleMaskEncoder but follows the interface of geometry_encoders.MaskEncoder. + We also remove the `skip_mask_sigmoid` option (to be handled outside the MaskEncoder). + Fuses backbone image features with mask features. + """ + + def __init__( + self, + mask_downsampler: nn.Module, + position_encoding: nn.Module, + fuser: nn.Module, + in_dim: int = 256, + out_dim: int = 256, + ): + super().__init__(mask_downsampler, position_encoding) + self.fuser = fuser + self.out_proj = nn.Identity() + if out_dim != in_dim: + self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) + self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) + + @override + def forward( + self, + masks: torch.Tensor, + pix_feat: torch.Tensor, + **kwargs, + ) -> Tuple[torch.Tensor, torch.Tensor]: + masks = self.mask_downsampler(masks) + + ## Fuse pix_feats and downsampled masks + # in case the visual features are on CPU, cast them to CUDA + pix_feat = pix_feat.to(masks.device) + + x = self.pix_feat_proj(pix_feat) + x = x + masks + x = self.fuser(x) + x = self.out_proj(x) + + pos = self.position_encoding(x).to(x.dtype) + + return x, pos + + +class SequenceGeometryEncoder(nn.Module): + """ + This a fully fledged encoder for geometric prompts. + It assumes boxes are passed in the "normalized CxCyWH" format, and points in normalized xy + This allows flexibility in how to encode the features (eg do pooling) + + Points and boxes can be encoded with any of the three possibilities: + - direct projection: we just compute a linear from coordinate space to d_model + - pooling: pool features from the backbone in the requested location. + For boxes, it's a roi align + For points it's a grid sample + - pos encoder: Take the position encoding of the point or box center + + These three options are mutually compatible. If several are selected, we'll take a simple addition + + As an alternative, we offer the possibility to encode points only. + In that case, the boxes are converted to two points for the top left and bottom right corners (with appropriate labels) + + On top of these encodings, we offer the possibility to further encode the prompt sequence with a transformer. + """ + + def __init__( + self, + encode_boxes_as_points: bool, + points_direct_project: bool, + points_pool: bool, + points_pos_enc: bool, + boxes_direct_project: bool, + boxes_pool: bool, + boxes_pos_enc: bool, + d_model: int, + pos_enc, + num_layers: int, + layer: nn.Module, + roi_size: int = 7, # for boxes pool + add_cls: bool = True, + add_post_encode_proj: bool = True, + mask_encoder: MaskEncoder = None, + add_mask_label: bool = False, + use_act_ckpt: bool = False, + ): + super().__init__() + + self.d_model = d_model + self.pos_enc = pos_enc + self.encode_boxes_as_points = encode_boxes_as_points + self.roi_size = roi_size + # There usually are two labels: positive and negatives. + # If we encode boxes as points, we have 3 types of points: regular, top left, bottom right + # These 3 types can be positives or negatives, hence 2*3 = 6 labels + num_labels = 6 if self.encode_boxes_as_points else 2 + self.label_embed = torch.nn.Embedding(num_labels, self.d_model) + + # This is a cls token, can be used for pooling if need be. + # It also ensures that the encoded sequences are always non-empty + self.cls_embed = None + if add_cls: + self.cls_embed = torch.nn.Embedding(1, self.d_model) + + assert ( + points_direct_project or points_pos_enc or points_pool + ), "Error: need at least one way to encode points" + assert ( + encode_boxes_as_points + or boxes_direct_project + or boxes_pos_enc + or boxes_pool + ), "Error: need at least one way to encode boxes" + + self.points_direct_project = None + if points_direct_project: + self.points_direct_project = nn.Linear(2, self.d_model) + self.points_pool_project = None + if points_pool: + self.points_pool_project = nn.Linear(self.d_model, self.d_model) + self.points_pos_enc_project = None + if points_pos_enc: + self.points_pos_enc_project = nn.Linear(self.d_model, self.d_model) + + self.boxes_direct_project = None + self.boxes_pool_project = None + self.boxes_pos_enc_project = None + if not encode_boxes_as_points: + if boxes_direct_project: + self.boxes_direct_project = nn.Linear(4, self.d_model) + if boxes_pool: + self.boxes_pool_project = nn.Conv2d( + self.d_model, self.d_model, self.roi_size + ) + if boxes_pos_enc: + self.boxes_pos_enc_project = nn.Linear(self.d_model + 2, self.d_model) + + self.final_proj = None + if add_post_encode_proj: + self.final_proj = nn.Linear(self.d_model, self.d_model) + self.norm = nn.LayerNorm(self.d_model) + + self.img_pre_norm = nn.Identity() + if self.points_pool_project is not None or self.boxes_pool_project is not None: + self.img_pre_norm = nn.LayerNorm(self.d_model) + + self.encode = None + if num_layers > 0: + assert ( + add_cls + ), "It's currently highly recommended to add a CLS when using a transformer" + self.encode = get_clones(layer, num_layers) + self.encode_norm = nn.LayerNorm(self.d_model) + + if mask_encoder is not None: + assert isinstance( + mask_encoder, MaskEncoder + ), f"Expected mask_encoder of type MaskEncoder. Got {type(mask_encoder)}." + if add_mask_label: + self.mask_label_embed = torch.nn.Embedding(2, self.d_model) + self.add_mask_label = add_mask_label + self.mask_encoder = mask_encoder + self.use_act_ckpt = use_act_ckpt + + def _encode_points(self, points, points_mask, points_labels, img_feats): + points_embed = None + n_points, bs = points.shape[:2] + + if self.points_direct_project is not None: + proj = self.points_direct_project(points) + assert points_embed is None + points_embed = proj + + if self.points_pool_project is not None: + # points are [Num_points, bs, 2], normalized in [0, 1] + # the grid needs to be [Bs, H_out, W_out, 2] normalized in [-1,1] + # Will take H_out = num_points, w_out = 1 + grid = points.transpose(0, 1).unsqueeze(2) + # re normalize to [-1, 1] + grid = (grid * 2) - 1 + sampled = torch.nn.functional.grid_sample( + img_feats, grid, align_corners=False + ) + assert list(sampled.shape) == [bs, self.d_model, n_points, 1] + sampled = sampled.squeeze(-1).permute(2, 0, 1) + proj = self.points_pool_project(sampled) + if points_embed is None: + points_embed = proj + else: + points_embed = points_embed + proj + + if self.points_pos_enc_project is not None: + x, y = points.unbind(-1) + enc_x, enc_y = self.pos_enc._encode_xy(x.flatten(), y.flatten()) + enc_x = enc_x.view(n_points, bs, enc_x.shape[-1]) + enc_y = enc_y.view(n_points, bs, enc_y.shape[-1]) + enc = torch.cat([enc_x, enc_y], -1) + + proj = self.points_pos_enc_project(enc) + if points_embed is None: + points_embed = proj + else: + points_embed = points_embed + proj + + type_embed = self.label_embed(points_labels.long()) + return type_embed + points_embed, points_mask + + def _encode_boxes(self, boxes, boxes_mask, boxes_labels, img_feats): + boxes_embed = None + n_boxes, bs = boxes.shape[:2] + + if self.boxes_direct_project is not None: + proj = self.boxes_direct_project(boxes) + assert boxes_embed is None + boxes_embed = proj + + if self.boxes_pool_project is not None: + H, W = img_feats.shape[-2:] + + # boxes are [Num_boxes, bs, 4], normalized in [0, 1] + # We need to denormalize, and convert to [x, y, x, y] + boxes_xyxy = box_cxcywh_to_xyxy(boxes) + scale = torch.tensor([W, H, W, H], dtype=boxes_xyxy.dtype) + scale = scale.pin_memory().to(device=boxes_xyxy.device, non_blocking=True) + scale = scale.view(1, 1, 4) + boxes_xyxy = boxes_xyxy * scale + sampled = torchvision.ops.roi_align( + img_feats, boxes_xyxy.float().transpose(0, 1).unbind(0), self.roi_size + ) + assert list(sampled.shape) == [ + bs * n_boxes, + self.d_model, + self.roi_size, + self.roi_size, + ] + proj = self.boxes_pool_project(sampled) + proj = proj.view(bs, n_boxes, self.d_model).transpose(0, 1) + if boxes_embed is None: + boxes_embed = proj + else: + boxes_embed = boxes_embed + proj + + if self.boxes_pos_enc_project is not None: + cx, cy, w, h = boxes.unbind(-1) + enc = self.pos_enc.encode_boxes( + cx.flatten(), cy.flatten(), w.flatten(), h.flatten() + ) + enc = enc.view(boxes.shape[0], boxes.shape[1], enc.shape[-1]) + + proj = self.boxes_pos_enc_project(enc) + if boxes_embed is None: + boxes_embed = proj + else: + boxes_embed = boxes_embed + proj + + type_embed = self.label_embed(boxes_labels.long()) + return type_embed + boxes_embed, boxes_mask + + def _encode_masks( + self, + masks: torch.Tensor, + attn_mask: torch.Tensor, + mask_labels: torch.Tensor, + img_feats: torch.Tensor = None, + ): + n_masks, bs = masks.shape[:2] + assert ( + n_masks == 1 + ), "We assume one mask per prompt for now. Code should still be functional if this assertion is removed." + assert ( + list(attn_mask.shape) + == [ + bs, + n_masks, + ] + ), f"Expected attn_mask to be of shape {bs}x{n_masks}. Got {list(attn_mask.shape)}." + masks, pos = self.mask_encoder( + masks=masks.flatten(0, 1).float(), + pix_feat=img_feats, + ) + H, W = masks.shape[-2:] + n_tokens_per_mask = H * W + # NOTE: We directly add pos enc here as we usually don't keep track of pos encoding for the concatenated prompt (text, other geometric prompts). Might need to do some refactoring for more flexibility. + masks = masks + pos + masks = masks.view(n_masks, bs, *masks.shape[1:]).flatten( + -2 + ) # n_masks x bs x C x H*W + masks = masks.permute(0, 3, 1, 2).flatten(0, 1) # n_masks * H*W x bs x C + attn_mask = attn_mask.repeat_interleave(n_tokens_per_mask, dim=1) + if self.add_mask_label: + masks = masks + self.mask_label_embed(mask_labels.long()) + return masks, attn_mask + + def forward(self, geo_prompt: Prompt, img_feats, img_sizes, img_pos_embeds=None): + points = geo_prompt.point_embeddings + points_mask = geo_prompt.point_mask + points_labels = geo_prompt.point_labels + boxes = geo_prompt.box_embeddings + boxes_mask = geo_prompt.box_mask + boxes_labels = geo_prompt.box_labels + masks = geo_prompt.mask_embeddings + masks_mask = geo_prompt.mask_mask + masks_labels = geo_prompt.mask_labels + seq_first_img_feats = img_feats[-1] # [H*W, B, C] + seq_first_img_pos_embeds = ( + img_pos_embeds[-1] + if img_pos_embeds is not None + else torch.zeros_like(seq_first_img_feats) + ) + + if self.points_pool_project or self.boxes_pool_project: + assert len(img_feats) == len(img_sizes) + cur_img_feat = img_feats[-1] + cur_img_feat = self.img_pre_norm(cur_img_feat) + H, W = img_sizes[-1] + assert cur_img_feat.shape[0] == H * W + N, C = cur_img_feat.shape[-2:] + # Put back in NxCxHxW + cur_img_feat = cur_img_feat.permute(1, 2, 0) + cur_img_feat = cur_img_feat.view(N, C, H, W) + img_feats = cur_img_feat + + if self.encode_boxes_as_points: + assert boxes is not None + assert geo_prompt.box_mask is not None + assert geo_prompt.box_labels is not None + assert boxes.shape[-1] == 4 + + boxes_xyxy = box_cxcywh_to_xyxy(boxes) + top_left, bottom_right = boxes_xyxy.split(split_size=2, dim=-1) + + labels_tl = geo_prompt.box_labels + 2 + labels_br = geo_prompt.box_labels + 4 + + # Append to the existing points + points, _ = concat_padded_sequences( + points, points_mask, top_left, boxes_mask + ) + points_labels, points_mask = concat_padded_sequences( + points_labels.unsqueeze(-1), + points_mask, + labels_tl.unsqueeze(-1), + boxes_mask, + ) + points_labels = points_labels.squeeze(-1) + + points, _ = concat_padded_sequences( + points, points_mask, bottom_right, boxes_mask + ) + points_labels, points_mask = concat_padded_sequences( + points_labels.unsqueeze(-1), + points_mask, + labels_br.unsqueeze(-1), + boxes_mask, + ) + points_labels = points_labels.squeeze(-1) + + final_embeds, final_mask = self._encode_points( + points=points, + points_mask=points_mask, + points_labels=points_labels, + img_feats=img_feats, + ) + + if not self.encode_boxes_as_points: + boxes_embeds, boxes_mask = self._encode_boxes( + boxes=boxes, + boxes_mask=boxes_mask, + boxes_labels=boxes_labels, + img_feats=img_feats, + ) + + final_embeds, final_mask = concat_padded_sequences( + final_embeds, final_mask, boxes_embeds, boxes_mask + ) + + if masks is not None and self.mask_encoder is not None: + masks_embed, masks_mask = self._encode_masks( + masks=masks, + attn_mask=masks_mask, + mask_labels=masks_labels, + img_feats=img_feats, + ) + if points.size(0) == boxes.size(0) == 0: + return masks_embed, masks_mask + bs = final_embeds.shape[1] + assert final_mask.shape[0] == bs + if self.cls_embed is not None: + cls = self.cls_embed.weight.view(1, 1, self.d_model).repeat(1, bs, 1) + cls_mask = torch.zeros( + bs, 1, dtype=final_mask.dtype, device=final_mask.device + ) + final_embeds, final_mask = concat_padded_sequences( + final_embeds, final_mask, cls, cls_mask + ) + + if self.final_proj is not None: + final_embeds = self.norm(self.final_proj(final_embeds)) + + if self.encode is not None: + for lay in self.encode: + final_embeds = activation_ckpt_wrapper(lay)( + tgt=final_embeds, + memory=seq_first_img_feats, + tgt_key_padding_mask=final_mask, + pos=seq_first_img_pos_embeds, + act_ckpt_enable=self.training and self.use_act_ckpt, + ) + final_embeds = self.encode_norm(final_embeds) + # Finally, concat mask embeddings if any + if masks is not None and self.mask_encoder is not None: + final_embeds, final_mask = concat_padded_sequences( + final_embeds, final_mask, masks_embed, masks_mask + ) + return final_embeds, final_mask diff --git a/detect_tools/sam3/sam3/model/io_utils.py b/detect_tools/sam3/sam3/model/io_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0a225842e4dca6eac64d84f262bddde2ee29d27b --- /dev/null +++ b/detect_tools/sam3/sam3/model/io_utils.py @@ -0,0 +1,709 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import contextlib +import os +import queue +import re +import time +from threading import Condition, get_ident, Lock, Thread + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision.transforms.functional as TF + +from PIL import Image + +from sam3.logger import get_logger +from tqdm import tqdm + +logger = get_logger(__name__) + +IS_MAIN_PROCESS = os.getenv("IS_MAIN_PROCESS", "1") == "1" +RANK = int(os.getenv("RANK", "0")) + +IMAGE_EXTS = [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".webp"] +VIDEO_EXTS = [".mp4", ".mov", ".avi", ".mkv", ".webm"] + + +def load_resource_as_video_frames( + resource_path, + image_size, + offload_video_to_cpu, + img_mean=(0.5, 0.5, 0.5), + img_std=(0.5, 0.5, 0.5), + async_loading_frames=False, + video_loader_type="cv2", +): + """ + Load video frames from either a video or an image (as a single-frame video). + Alternatively, if input is a list of PIL images, convert its format + """ + if isinstance(resource_path, list): + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + assert all(isinstance(img_pil, Image.Image) for img_pil in resource_path) + assert len(resource_path) is not None + orig_height, orig_width = resource_path[0].size + orig_height, orig_width = ( + orig_width, + orig_height, + ) # For some reason, this method returns these swapped + images = [] + for img_pil in resource_path: + img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size))) + assert img_np.dtype == np.uint8, "np.uint8 is expected for JPEG images" + img_np = img_np / 255.0 + img = torch.from_numpy(img_np).permute(2, 0, 1) + # float16 precision should be sufficient for image tensor storage + img = img.to(dtype=torch.float16) + # normalize by mean and std + img -= img_mean + img /= img_std + images.append(img) + images = torch.stack(images) + if not offload_video_to_cpu: + images = images.cuda() + return images, orig_height, orig_width + + is_image = ( + isinstance(resource_path, str) + and os.path.splitext(resource_path)[-1].lower() in IMAGE_EXTS + ) + if is_image: + return load_image_as_single_frame_video( + image_path=resource_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + ) + else: + return load_video_frames( + video_path=resource_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + video_loader_type=video_loader_type, + ) + + +def load_image_as_single_frame_video( + image_path, + image_size, + offload_video_to_cpu, + img_mean=(0.5, 0.5, 0.5), + img_std=(0.5, 0.5, 0.5), +): + """Load an image as a single-frame video.""" + images, image_height, image_width = _load_img_as_tensor(image_path, image_size) + images = images.unsqueeze(0).half() + + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + if not offload_video_to_cpu: + images = images.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + images -= img_mean + images /= img_std + return images, image_height, image_width + + +def load_video_frames( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.5, 0.5, 0.5), + img_std=(0.5, 0.5, 0.5), + async_loading_frames=False, + video_loader_type="cv2", +): + """ + Load the video frames from video_path. The frames are resized to image_size as in + the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo. + """ + assert isinstance(video_path, str) + if video_path.startswith(" where N is an integer + match = re.match(r"", video_path) + num_frames = int(match.group(1)) if match else 60 + return load_dummy_video(image_size, offload_video_to_cpu, num_frames=num_frames) + elif os.path.isdir(video_path): + return load_video_frames_from_image_folder( + image_folder=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + ) + elif os.path.splitext(video_path)[-1].lower() in VIDEO_EXTS: + return load_video_frames_from_video_file( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + video_loader_type=video_loader_type, + ) + else: + raise NotImplementedError("Only video files and image folders are supported") + + +def load_video_frames_from_image_folder( + image_folder, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + async_loading_frames, +): + """ + Load the video frames from a directory of image files ("." format) + """ + frame_names = [ + p + for p in os.listdir(image_folder) + if os.path.splitext(p)[-1].lower() in IMAGE_EXTS + ] + try: + frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) + except ValueError: + # fallback to lexicographic sort if the format is not "." + logger.warning( + f'frame names are not in "." format: {frame_names[:5]=}, ' + f"falling back to lexicographic sort." + ) + frame_names.sort() + num_frames = len(frame_names) + if num_frames == 0: + raise RuntimeError(f"no images found in {image_folder}") + img_paths = [os.path.join(image_folder, frame_name) for frame_name in frame_names] + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + + if async_loading_frames: + lazy_images = AsyncImageFrameLoader( + img_paths, image_size, offload_video_to_cpu, img_mean, img_std + ) + return lazy_images, lazy_images.video_height, lazy_images.video_width + + # float16 precision should be sufficient for image tensor storage + images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float16) + video_height, video_width = None, None + for n, img_path in enumerate( + tqdm(img_paths, desc=f"frame loading (image folder) [rank={RANK}]") + ): + images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size) + if not offload_video_to_cpu: + images = images.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width + + +def load_video_frames_from_video_file( + video_path, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + async_loading_frames, + gpu_acceleration=False, + gpu_device=None, + video_loader_type="cv2", +): + """Load the video frames from a video file.""" + if video_loader_type == "cv2": + return load_video_frames_from_video_file_using_cv2( + video_path=video_path, + image_size=image_size, + img_mean=img_mean, + img_std=img_std, + offload_video_to_cpu=offload_video_to_cpu, + ) + elif video_loader_type == "torchcodec": + logger.info("Using torchcodec to load video file") + lazy_images = AsyncVideoFileLoaderWithTorchCodec( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + gpu_acceleration=gpu_acceleration, + gpu_device=gpu_device, + ) + # The `AsyncVideoFileLoaderWithTorchCodec` class always loads the videos asynchronously, + # so we just wait for its loading thread to finish if async_loading_frames=False. + if not async_loading_frames: + async_thread = lazy_images.thread + if async_thread is not None: + async_thread.join() + return lazy_images, lazy_images.video_height, lazy_images.video_width + else: + raise RuntimeError("video_loader_type must be either 'cv2' or 'torchcodec'") + + +def load_video_frames_from_video_file_using_cv2( + video_path: str, + image_size: int, + img_mean: tuple = (0.5, 0.5, 0.5), + img_std: tuple = (0.5, 0.5, 0.5), + offload_video_to_cpu: bool = False, +) -> torch.Tensor: + """ + Load video from path, convert to normalized tensor with specified preprocessing + + Args: + video_path: Path to video file + image_size: Target size for square frames (height and width) + img_mean: Normalization mean (RGB) + img_std: Normalization standard deviation (RGB) + + Returns: + torch.Tensor: Preprocessed video tensor in shape (T, C, H, W) with float16 dtype + """ + import cv2 # delay OpenCV import to avoid unnecessary dependency + + # Initialize video capture + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + raise ValueError(f"Could not open video: {video_path}") + + original_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + original_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + num_frames = num_frames if num_frames > 0 else None + + frames = [] + pbar = tqdm(desc=f"frame loading (OpenCV) [rank={RANK}]", total=num_frames) + while True: + ret, frame = cap.read() + if not ret: + break + + # Convert BGR to RGB and resize + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame_resized = cv2.resize( + frame_rgb, (image_size, image_size), interpolation=cv2.INTER_CUBIC + ) + frames.append(frame_resized) + pbar.update(1) + cap.release() + pbar.close() + + # Convert to tensor + frames_np = np.stack(frames, axis=0).astype(np.float32) # (T, H, W, C) + video_tensor = torch.from_numpy(frames_np).permute(0, 3, 1, 2) # (T, C, H, W) + + img_mean = torch.tensor(img_mean, dtype=torch.float16).view(1, 3, 1, 1) + img_std = torch.tensor(img_std, dtype=torch.float16).view(1, 3, 1, 1) + if not offload_video_to_cpu: + video_tensor = video_tensor.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + video_tensor -= img_mean + video_tensor /= img_std + return video_tensor, original_height, original_width + + +def load_dummy_video(image_size, offload_video_to_cpu, num_frames=60): + """ + Load a dummy video with random frames for testing and compilation warmup purposes. + """ + video_height, video_width = 480, 640 # dummy original video sizes + images = torch.randn(num_frames, 3, image_size, image_size, dtype=torch.float16) + if not offload_video_to_cpu: + images = images.cuda() + return images, video_height, video_width + + +def _load_img_as_tensor(img_path, image_size): + """Load and resize an image and convert it into a PyTorch tensor.""" + img = Image.open(img_path).convert("RGB") + orig_width, orig_height = img.width, img.height + img = TF.resize(img, size=(image_size, image_size)) + img = TF.to_tensor(img) + return img, orig_height, orig_width + + +class AsyncImageFrameLoader: + """ + A list of video frames to be load asynchronously without blocking session start. + """ + + def __init__(self, img_paths, image_size, offload_video_to_cpu, img_mean, img_std): + self.img_paths = img_paths + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + self.img_mean = img_mean + self.img_std = img_std + # items in `self._images` will be loaded asynchronously + self.images = [None] * len(img_paths) + # catch and raise any exceptions in the async loading thread + self.exception = None + # video_height and video_width be filled when loading the first image + self.video_height = None + self.video_width = None + + # load the first frame to fill video_height and video_width and also + # to cache it (since it's most likely where the user will click) + self.__getitem__(0) + + # load the rest of frames asynchronously without blocking the session start + def _load_frames(): + try: + for n in tqdm( + range(len(self.images)), + desc=f"frame loading (image folder) [rank={RANK}]", + ): + self.__getitem__(n) + except Exception as e: + self.exception = e + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + img = self.images[index] + if img is not None: + return img + + img, video_height, video_width = _load_img_as_tensor( + self.img_paths[index], self.image_size + ) + self.video_height = video_height + self.video_width = video_width + # float16 precision should be sufficient for image tensor storage + img = img.to(dtype=torch.float16) + # normalize by mean and std + img -= self.img_mean + img /= self.img_std + if not self.offload_video_to_cpu: + img = img.cuda() + self.images[index] = img + return img + + def __len__(self): + return len(self.images) + + +class TorchCodecDecoder: + """ + A wrapper to support GPU device and num_threads in TorchCodec decoder, + which are not supported by `torchcodec.decoders.SimpleVideoDecoder` yet. + """ + + def __init__(self, source, dimension_order="NCHW", device="cpu", num_threads=1): + from torchcodec import _core as core + + self._source = source # hold a reference to the source to prevent it from GC + if isinstance(source, str): + self._decoder = core.create_from_file(source, "exact") + elif isinstance(source, bytes): + self._decoder = core.create_from_bytes(source, "exact") + else: + raise TypeError(f"Unknown source type: {type(source)}.") + assert dimension_order in ("NCHW", "NHWC") + + device_string = str(device) + core.scan_all_streams_to_update_metadata(self._decoder) + core.add_video_stream( + self._decoder, + dimension_order=dimension_order, + device=device_string, + num_threads=(1 if "cuda" in device_string else num_threads), + ) + video_metadata = core.get_container_metadata(self._decoder) + best_stream_index = video_metadata.best_video_stream_index + assert best_stream_index is not None + self.metadata = video_metadata.streams[best_stream_index] + assert self.metadata.num_frames_from_content is not None + self._num_frames = self.metadata.num_frames_from_content + + def __len__(self) -> int: + return self._num_frames + + def __getitem__(self, key: int): + from torchcodec import _core as core + + if key < 0: + key += self._num_frames + if key >= self._num_frames or key < 0: + raise IndexError( + f"Index {key} is out of bounds; length is {self._num_frames}" + ) + frame_data, *_ = core.get_frame_at_index( + self._decoder, + frame_index=key, + ) + return frame_data + + +class FIFOLock: + """A lock that ensures FIFO ordering of lock acquisitions.""" + + def __init__(self): + self._lock = Lock() + self._waiters = queue.Queue() + self._condition = Condition() + + def acquire(self): + ident = get_ident() + with self._condition: + self._waiters.put(ident) + while self._waiters.queue[0] != ident or not self._lock.acquire( + blocking=False + ): + self._condition.wait() + # got the lock and it's our turn + + def release(self): + with self._condition: + self._lock.release() + self._waiters.get() + self._condition.notify_all() + + def __enter__(self): + self.acquire() + + def __exit__(self, t, v, tb): + self.release() + + +class AsyncVideoFileLoaderWithTorchCodec: + """ + Loading frames from video files asynchronously without blocking session start. + + Unlike `AsyncVideoFileLoader`, this class uses PyTorch's offical TorchCodec library + for video decoding, which is more efficient and supports more video formats. + """ + + def __init__( + self, + video_path, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + gpu_acceleration=True, + gpu_device=None, + use_rand_seek_in_loading=False, + ): + # Check and possibly infer the output device (and also get its GPU id when applicable) + assert gpu_device is None or gpu_device.type == "cuda" + gpu_id = ( + gpu_device.index + if gpu_device is not None and gpu_device.index is not None + else torch.cuda.current_device() + ) + if offload_video_to_cpu: + out_device = torch.device("cpu") + else: + out_device = torch.device("cuda") if gpu_device is None else gpu_device + self.out_device = out_device + self.gpu_acceleration = gpu_acceleration + self.gpu_id = gpu_id + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + if not isinstance(img_mean, torch.Tensor): + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + self.img_mean = img_mean + if not isinstance(img_std, torch.Tensor): + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + self.img_std = img_std + + if gpu_acceleration: + self.img_mean = self.img_mean.to(f"cuda:{self.gpu_id}") + self.img_std = self.img_std.to(f"cuda:{self.gpu_id}") + decoder_option = {"device": f"cuda:{self.gpu_id}"} + else: + self.img_mean = self.img_mean.cpu() + self.img_std = self.img_std.cpu() + decoder_option = {"num_threads": 1} # use a single thread to save memory + + self.rank = int(os.environ.get("RANK", "0")) + self.world_size = int(os.environ.get("WORLD_SIZE", "1")) + self.async_reader = TorchCodecDecoder(video_path, **decoder_option) + + # `num_frames_from_content` is the true number of frames in the video content + # from the scan operation (rather than from the metadata, which could be wrong) + self.num_frames = self.async_reader.metadata.num_frames_from_content + self.video_height = self.async_reader.metadata.height + self.video_width = self.async_reader.metadata.width + + # items in `self._images` will be loaded asynchronously + self.images_loaded = [False] * self.num_frames + self.images = torch.zeros( + self.num_frames, + 3, + self.image_size, + self.image_size, + dtype=torch.float16, + device=self.out_device, + ) + # catch and raise any exceptions in the async loading thread + self.exception = None + self.use_rand_seek_in_loading = use_rand_seek_in_loading + self.rand_seek_idx_queue = queue.Queue() + # use a lock to avoid race condition between concurrent access to torchcodec + # libs (which are not thread-safe); the lock is replaced with a nullcontext + # when the video is fully loaded + self.torchcodec_access_lock = FIFOLock() + self._start_video_loading() + + def _load_one_frame(self, idx): + frame_resized = self._transform_frame(self.async_reader[idx]) + return frame_resized + + @torch.inference_mode() + def _start_video_loading(self): + desc = f"frame loading (TorchCodec w/ {'GPU' if self.gpu_acceleration else 'CPU'}) [rank={RANK}]" + pbar = tqdm(desc=desc, total=self.num_frames) + self.num_loaded_frames = 0 + # load the first frame synchronously to cache it before the session is opened + idx = self.num_loaded_frames + self.images[idx] = self._load_one_frame(idx) + self.images_loaded[idx] = True + self.num_loaded_frames += 1 + pbar.update(n=1) + self.all_frames_loaded = self.num_loaded_frames == self.num_frames + + # load the frames asynchronously without blocking the session start + def _load_frames(): + finished = self.all_frames_loaded + chunk_size = 16 + while not finished: + # asynchronously load `chunk_size` frames each time we acquire the lock + with self.torchcodec_access_lock, torch.inference_mode(): + for _ in range(chunk_size): + try: + idx = self.num_loaded_frames + self.images[idx] = self._load_one_frame(idx) + self.images_loaded[idx] = True + self.num_loaded_frames += 1 + pbar.update(n=1) + if self.num_loaded_frames >= self.num_frames: + finished = True + break + except Exception as e: + self.exception = e + raise + + # also read the frame that is being randomly seeked to + while True: + try: + idx = self.rand_seek_idx_queue.get_nowait() + if not self.images_loaded[idx]: + self.images[idx] = self._load_one_frame(idx) + self.images_loaded[idx] = True + except queue.Empty: + break + except Exception as e: + self.exception = e + raise + + # finished -- check whether we have loaded the total number of frames + if self.num_loaded_frames != self.num_frames: + raise RuntimeError( + f"There are {self.num_frames} frames in the video, but only " + f"{self.num_loaded_frames} frames can be loaded successfully." + ) + else: + self.all_frames_loaded = True + pbar.close() + with self.torchcodec_access_lock: + import gc + + # all frames have been loaded, so we can release the readers and free their memory + # also remove pbar and thread (which shouldn't be a part of session saving) + reader = self.async_reader + if reader is not None: + reader._source = None + self.async_reader = None + self.pbar = None + self.thread = None + self.rand_seek_idx_queue = None + gc.collect() + # remove the lock (replace it with nullcontext) when the video is fully loaded + self.torchcodec_access_lock = contextlib.nullcontext() + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def _transform_frame(self, frame): + frame = frame.clone() # make a copy to avoid modifying the original frame bytes + frame = frame.float() # convert to float32 before interpolation + frame_resized = F.interpolate( + frame[None, :], + size=(self.image_size, self.image_size), + mode="bicubic", + align_corners=False, + )[0] + # float16 precision should be sufficient for image tensor storage + frame_resized = frame_resized.half() # uint8 -> float16 + frame_resized /= 255 + frame_resized -= self.img_mean + frame_resized /= self.img_std + if self.offload_video_to_cpu: + frame_resized = frame_resized.cpu() + elif frame_resized.device != self.out_device: + frame_resized = frame_resized.to(device=self.out_device, non_blocking=True) + return frame_resized + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + max_tries = 1200 + for _ in range(max_tries): + # use a lock to avoid race condition between concurrent access to torchcodec + # libs (which are not thread-safe); the lock is replaced with a nullcontext + # when the video is fully loaded + with self.torchcodec_access_lock: + if self.images_loaded[index]: + return self.images[index] + + if self.use_rand_seek_in_loading: + # async loading hasn't reached this frame yet, so we load this frame individually + # (it will be loaded by in _load_frames thread and added to self.images[index]) + self.rand_seek_idx_queue.put(index) + + time.sleep(0.1) + + raise RuntimeError(f"Failed to load frame {index} after {max_tries} tries") + + def __len__(self): + return len(self.images) + + def __getstate__(self): + """ + Remove a few attributes during pickling, so that this async video loader can be + saved and loaded as a part of the model session. + """ + # wait for async video loading to finish before pickling + async_thread = self.thread + if async_thread is not None: + async_thread.join() + # release a few objects that cannot be pickled + reader = self.async_reader + if reader is not None: + reader._source = None + self.async_reader = None + self.pbar = None + self.thread = None + self.rand_seek_idx_queue = None + self.torchcodec_access_lock = contextlib.nullcontext() + return self.__dict__.copy() diff --git a/detect_tools/sam3/sam3/model/maskformer_segmentation.py b/detect_tools/sam3/sam3/model/maskformer_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f5ae8f2f1e3626f78a2752f540424f0c92aab2 --- /dev/null +++ b/detect_tools/sam3/sam3/model/maskformer_segmentation.py @@ -0,0 +1,323 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math +from typing import Dict, List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .model_misc import MLP + + +class LinearPresenceHead(nn.Sequential): + def __init__(self, d_model): + # a hack to make `LinearPresenceHead` compatible with old checkpoints + super().__init__(nn.Identity(), nn.Identity(), nn.Linear(d_model, 1)) + + def forward(self, hs, prompt, prompt_mask): + return super().forward(hs) + + +class MaskPredictor(nn.Module): + def __init__(self, hidden_dim, mask_dim): + super().__init__() + self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3) + + def forward(self, obj_queries, pixel_embed): + if len(obj_queries.shape) == 3: + if pixel_embed.ndim == 3: + # batch size was omitted + mask_preds = torch.einsum( + "bqc,chw->bqhw", self.mask_embed(obj_queries), pixel_embed + ) + else: + mask_preds = torch.einsum( + "bqc,bchw->bqhw", self.mask_embed(obj_queries), pixel_embed + ) + else: + # Assumed to have aux masks + if pixel_embed.ndim == 3: + # batch size was omitted + mask_preds = torch.einsum( + "lbqc,chw->lbqhw", self.mask_embed(obj_queries), pixel_embed + ) + else: + mask_preds = torch.einsum( + "lbqc,bchw->lbqhw", self.mask_embed(obj_queries), pixel_embed + ) + + return mask_preds + + +class SegmentationHead(nn.Module): + def __init__( + self, + hidden_dim, + upsampling_stages, + use_encoder_inputs=False, + aux_masks=False, + no_dec=False, + pixel_decoder=None, + act_ckpt=False, + shared_conv=False, + compile_mode_pixel_decoder=None, + ): + super().__init__() + self.use_encoder_inputs = use_encoder_inputs + self.aux_masks = aux_masks + if pixel_decoder is not None: + self.pixel_decoder = pixel_decoder + else: + self.pixel_decoder = PixelDecoder( + hidden_dim, + upsampling_stages, + shared_conv=shared_conv, + compile_mode=compile_mode_pixel_decoder, + ) + self.no_dec = no_dec + if no_dec: + self.mask_predictor = nn.Conv2d( + hidden_dim, 1, kernel_size=3, stride=1, padding=1 + ) + else: + self.mask_predictor = MaskPredictor(hidden_dim, mask_dim=hidden_dim) + + self.act_ckpt = act_ckpt + + # used to update the output dictionary + self.instance_keys = ["pred_masks"] + + @property + def device(self): + self._device = getattr(self, "_device", None) or next(self.parameters()).device + return self._device + + def to(self, *args, **kwargs): + # clear cached _device in case the model is moved to a different device + self._device = None + return super().to(*args, **kwargs) + + def _embed_pixels( + self, + backbone_feats: List[torch.Tensor], + image_ids, + encoder_hidden_states, + ) -> torch.Tensor: + feature_device = backbone_feats[0].device # features could be on CPU + model_device = self.device + image_ids_ = image_ids.to(feature_device) + if self.use_encoder_inputs: + if backbone_feats[0].shape[0] > 1: + # For bs > 1, we construct the per query backbone features + backbone_visual_feats = [] + for feat in backbone_feats: + # Copy the img features per query (pixel decoder won't share img feats) + backbone_visual_feats.append(feat[image_ids_, ...].to(model_device)) + else: + # Bs=1, we rely on broadcasting for query-based processing + backbone_visual_feats = [bb_feat.clone() for bb_feat in backbone_feats] + # Extract visual embeddings + encoder_hidden_states = encoder_hidden_states.permute(1, 2, 0) + spatial_dim = math.prod(backbone_feats[-1].shape[-2:]) + encoder_visual_embed = encoder_hidden_states[..., :spatial_dim].reshape( + -1, *backbone_feats[-1].shape[1:] + ) + + backbone_visual_feats[-1] = encoder_visual_embed + if self.act_ckpt: + pixel_embed = checkpoint.checkpoint( + self.pixel_decoder, backbone_visual_feats, use_reentrant=False + ) + else: + pixel_embed = self.pixel_decoder(backbone_visual_feats) + else: + backbone_feats = [x.to(model_device) for x in backbone_feats] + pixel_embed = self.pixel_decoder(backbone_feats) + if pixel_embed.shape[0] == 1: + # For batch_size=1 training, we can avoid the indexing to save memory + pixel_embed = pixel_embed.squeeze(0) + else: + pixel_embed = pixel_embed[image_ids, ...] + return pixel_embed + + def forward( + self, + backbone_feats: List[torch.Tensor], + obj_queries: torch.Tensor, + image_ids, + encoder_hidden_states: Optional[torch.Tensor] = None, + **kwargs, + ) -> Dict[str, torch.Tensor]: + if self.use_encoder_inputs: + assert encoder_hidden_states is not None + + pixel_embed = self._embed_pixels( + backbone_feats=backbone_feats, + image_ids=image_ids, + encoder_hidden_states=encoder_hidden_states, + ) + + if self.no_dec: + mask_pred = self.mask_predictor(pixel_embed) + elif self.aux_masks: + mask_pred = self.mask_predictor(obj_queries, pixel_embed) + else: + mask_pred = self.mask_predictor(obj_queries[-1], pixel_embed) + + return {"pred_masks": mask_pred} + + +class PixelDecoder(nn.Module): + def __init__( + self, + hidden_dim, + num_upsampling_stages, + interpolation_mode="nearest", + shared_conv=False, + compile_mode=None, + ): + super().__init__() + self.hidden_dim = hidden_dim + self.num_upsampling_stages = num_upsampling_stages + self.interpolation_mode = interpolation_mode + conv_layers = [] + norms = [] + num_convs = 1 if shared_conv else num_upsampling_stages + for _ in range(num_convs): + conv_layers.append(nn.Conv2d(self.hidden_dim, self.hidden_dim, 3, 1, 1)) + norms.append(nn.GroupNorm(8, self.hidden_dim)) + + self.conv_layers = nn.ModuleList(conv_layers) + self.norms = nn.ModuleList(norms) + self.shared_conv = shared_conv + self.out_dim = self.conv_layers[-1].out_channels + if compile_mode is not None: + self.forward = torch.compile( + self.forward, mode=compile_mode, dynamic=True, fullgraph=True + ) + # Needed to make checkpointing happy. But we don't know if the module is checkpointed, so we disable it by default. + torch._dynamo.config.optimize_ddp = False + + def forward(self, backbone_feats: List[torch.Tensor]): + # Assumes backbone features are already projected (C == hidden dim) + + prev_fpn = backbone_feats[-1] + fpn_feats = backbone_feats[:-1] + for layer_idx, bb_feat in enumerate(fpn_feats[::-1]): + curr_fpn = bb_feat + prev_fpn = curr_fpn + F.interpolate( + prev_fpn, size=curr_fpn.shape[-2:], mode=self.interpolation_mode + ) + if self.shared_conv: + # only one conv layer + layer_idx = 0 + prev_fpn = self.conv_layers[layer_idx](prev_fpn) + prev_fpn = F.relu(self.norms[layer_idx](prev_fpn)) + + return prev_fpn + + +class UniversalSegmentationHead(SegmentationHead): + """This module handles semantic+instance segmentation""" + + def __init__( + self, + hidden_dim, + upsampling_stages, + pixel_decoder, + aux_masks=False, + no_dec=False, + act_ckpt=False, + presence_head: bool = False, + dot_product_scorer=None, + cross_attend_prompt=None, + ): + super().__init__( + hidden_dim=hidden_dim, + upsampling_stages=upsampling_stages, + use_encoder_inputs=True, + aux_masks=aux_masks, + no_dec=no_dec, + pixel_decoder=pixel_decoder, + act_ckpt=act_ckpt, + ) + self.d_model = hidden_dim + + if dot_product_scorer is not None: + assert presence_head, "Specifying a dot product scorer without a presence head is likely a mistake" + + self.presence_head = None + if presence_head: + self.presence_head = ( + dot_product_scorer + if dot_product_scorer is not None + else LinearPresenceHead(self.d_model) + ) + + self.cross_attend_prompt = cross_attend_prompt + if self.cross_attend_prompt is not None: + self.cross_attn_norm = nn.LayerNorm(self.d_model) + + self.semantic_seg_head = nn.Conv2d(self.pixel_decoder.out_dim, 1, kernel_size=1) + self.instance_seg_head = nn.Conv2d( + self.pixel_decoder.out_dim, self.d_model, kernel_size=1 + ) + + def forward( + self, + backbone_feats: List[torch.Tensor], + obj_queries: torch.Tensor, + image_ids, + encoder_hidden_states: Optional[torch.Tensor] = None, + prompt: Optional[torch.Tensor] = None, + prompt_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> Dict[str, Optional[torch.Tensor]]: + assert encoder_hidden_states is not None + bs = encoder_hidden_states.shape[1] + + if self.cross_attend_prompt is not None: + tgt2 = self.cross_attn_norm(encoder_hidden_states) + tgt2 = self.cross_attend_prompt( + query=tgt2, + key=prompt, + value=prompt, + key_padding_mask=prompt_mask, + )[0] + encoder_hidden_states = tgt2 + encoder_hidden_states + + presence_logit = None + if self.presence_head is not None: + pooled_enc = encoder_hidden_states.mean(0) + presence_logit = ( + self.presence_head( + pooled_enc.view(1, bs, 1, self.d_model), + prompt=prompt, + prompt_mask=prompt_mask, + ) + .squeeze(0) + .squeeze(1) + ) + + pixel_embed = self._embed_pixels( + backbone_feats=backbone_feats, + image_ids=image_ids, + encoder_hidden_states=encoder_hidden_states, + ) + + instance_embeds = self.instance_seg_head(pixel_embed) + + if self.no_dec: + mask_pred = self.mask_predictor(instance_embeds) + elif self.aux_masks: + mask_pred = self.mask_predictor(obj_queries, instance_embeds) + else: + mask_pred = self.mask_predictor(obj_queries[-1], instance_embeds) + + return { + "pred_masks": mask_pred, + "semantic_seg": self.semantic_seg_head(pixel_embed), + "presence_logit": presence_logit, + } diff --git a/detect_tools/sam3/sam3/model/memory.py b/detect_tools/sam3/sam3/model/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..bfde5487d85006ab1aa35044fd431260dff2870e --- /dev/null +++ b/detect_tools/sam3/sam3/model/memory.py @@ -0,0 +1,201 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + from timm.layers import DropPath +except ModuleNotFoundError: + # compatibility for older timm versions + from timm.models.layers import DropPath + +from .model_misc import get_clones, LayerNorm2d + + +class SimpleMaskDownSampler(nn.Module): + """ + Progressively downsample a mask by total_stride, each time by stride. + Note that LayerNorm is applied per *token*, like in ViT. + + With each downsample (by a factor stride**2), channel capacity increases by the same factor. + In the end, we linearly project to embed_dim channels. + """ + + def __init__( + self, + embed_dim=256, + kernel_size=4, + stride=4, + padding=0, + total_stride=16, + activation=nn.GELU, + # Option to interpolate the input mask first before downsampling using convs. In that case, the total_stride is assumed to be after interpolation. + # If set to input resolution or None, we don't interpolate. We default to None to be safe (for older configs or if not explicitly set) + interpol_size=None, + ): + super().__init__() + num_layers = int(math.log2(total_stride) // math.log2(stride)) + assert stride**num_layers == total_stride + self.encoder = nn.Sequential() + mask_in_chans, mask_out_chans = 1, 1 + for _ in range(num_layers): + mask_out_chans = mask_in_chans * (stride**2) + self.encoder.append( + nn.Conv2d( + mask_in_chans, + mask_out_chans, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + ) + self.encoder.append(LayerNorm2d(mask_out_chans)) + self.encoder.append(activation()) + mask_in_chans = mask_out_chans + + self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1)) + self.interpol_size = interpol_size + if self.interpol_size is not None: + assert isinstance( + self.interpol_size, (list, tuple) + ), f"Unsupported type {type(self.interpol_size)}. Should be a list or tuple." + self.interpol_size = list(interpol_size) + assert len(self.interpol_size) == 2 + + def forward(self, x: torch.Tensor): + if self.interpol_size is not None and self.interpol_size != list(x.shape[-2:]): + x = F.interpolate( + x.float(), + size=self.interpol_size, + align_corners=False, + mode="bilinear", + antialias=True, + ) + return self.encoder(x) + + +# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt) +class CXBlock(nn.Module): + r"""ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + dim, + kernel_size=7, + padding=3, + drop_path=0.0, + layer_scale_init_value=1e-6, + use_dwconv=True, + ): + super().__init__() + self.dwconv = nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=padding, + groups=dim if use_dwconv else 1, + ) # depthwise conv + self.norm = LayerNorm2d(dim, eps=1e-6) + self.pwconv1 = nn.Linear( + dim, 4 * dim + ) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = ( + nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + if layer_scale_init_value > 0 + else None + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = self.norm(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class SimpleFuser(nn.Module): + def __init__(self, layer, num_layers, dim=None, input_projection=False): + super().__init__() + self.proj = nn.Identity() + self.layers = get_clones(layer, num_layers) + + if input_projection: + assert dim is not None + self.proj = nn.Conv2d(dim, dim, kernel_size=1) + + def forward(self, x): + # normally x: (N, C, H, W) + x = self.proj(x) + for layer in self.layers: + x = layer(x) + return x + + +class SimpleMaskEncoder(nn.Module): + def __init__( + self, + out_dim, + mask_downsampler, + fuser, + position_encoding, + in_dim=256, # in_dim of pix_feats + ): + super().__init__() + + self.mask_downsampler = mask_downsampler + + self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) + self.fuser = fuser + self.position_encoding = position_encoding + self.out_proj = nn.Identity() + if out_dim != in_dim: + self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) + + def forward( + self, + pix_feat: torch.Tensor, + masks: torch.Tensor, + skip_mask_sigmoid: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + ## Process masks + # sigmoid, so that less domain shift from gt masks which are bool + if not skip_mask_sigmoid: + masks = F.sigmoid(masks) + masks = self.mask_downsampler(masks) + + ## Fuse pix_feats and downsampled masks + # in case the visual features are on CPU, cast them to CUDA + pix_feat = pix_feat.to(masks.device) + + x = self.pix_feat_proj(pix_feat) + x = x + masks + x = self.fuser(x) + x = self.out_proj(x) + + pos = self.position_encoding(x).to(x.dtype) + + return {"vision_features": x, "vision_pos_enc": [pos]} diff --git a/detect_tools/sam3/sam3/model/model_misc.py b/detect_tools/sam3/sam3/model/model_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb44b3fc4e422f087d923f5449990fc86c60b39 --- /dev/null +++ b/detect_tools/sam3/sam3/model/model_misc.py @@ -0,0 +1,428 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Various utility models""" + +import copy +import math +import weakref +from collections.abc import Iterator +from contextlib import AbstractContextManager +from enum import auto, Enum +from typing import Dict, List, Optional, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn, Tensor +from typing_extensions import override + + +def inverse_sigmoid(x, eps=1e-3): + """ + The inverse function for sigmoid activation function. + Note: It might face numberical issues with fp16 small eps. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +class MultiheadAttentionWrapper(nn.MultiheadAttention): + def forward(self, *args, **kwargs): + kwargs["need_weights"] = False + return super().forward(*args, **kwargs) + + +class DotProductScoring(torch.nn.Module): + def __init__( + self, + d_model, + d_proj, + prompt_mlp=None, + clamp_logits=True, + clamp_max_val=12.0, + ): + super().__init__() + self.d_proj = d_proj + assert isinstance(prompt_mlp, torch.nn.Module) or prompt_mlp is None + self.prompt_mlp = prompt_mlp # an optional MLP projection for prompt + self.prompt_proj = torch.nn.Linear(d_model, d_proj) + self.hs_proj = torch.nn.Linear(d_model, d_proj) + self.scale = float(1.0 / np.sqrt(d_proj)) + self.clamp_logits = clamp_logits + if self.clamp_logits: + self.clamp_max_val = clamp_max_val + + def mean_pool_text(self, prompt, prompt_mask): + # is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding + is_valid = (~prompt_mask).float().permute(1, 0)[..., None] + # num_valid has shape (bs, 1) + num_valid = torch.clamp(torch.sum(is_valid, dim=0), min=1.0) + # mean pool over all the valid tokens -- pooled_prompt has shape (bs, proj_dim) + pooled_prompt = (prompt * is_valid).sum(dim=0) / num_valid + return pooled_prompt + + def forward(self, hs, prompt, prompt_mask): + # hs has shape (num_layer, bs, num_query, d_model) + # prompt has shape (seq, bs, d_model) + # prompt_mask has shape (bs, seq), where 1 is valid and 0 is padding + assert hs.dim() == 4 and prompt.dim() == 3 and prompt_mask.dim() == 2 + + # apply MLP on prompt if specified + if self.prompt_mlp is not None: + prompt = self.prompt_mlp(prompt) + + # first, get the mean-pooled version of the prompt + pooled_prompt = self.mean_pool_text(prompt, prompt_mask) + + # then, project pooled_prompt and hs to d_proj dimensions + proj_pooled_prompt = self.prompt_proj(pooled_prompt) # (bs, d_proj) + proj_hs = self.hs_proj(hs) # (num_layer, bs, num_query, d_proj) + + # finally, get dot-product scores of shape (num_layer, bs, num_query, 1) + scores = torch.matmul(proj_hs, proj_pooled_prompt.unsqueeze(-1)) + scores *= self.scale + + # clamp scores to a max value to avoid numerical issues in loss or matcher + if self.clamp_logits: + scores.clamp_(min=-self.clamp_max_val, max=self.clamp_max_val) + + return scores + + +class LayerScale(nn.Module): + def __init__( + self, + dim: int, + init_values: Union[float, Tensor] = 1e-5, + inplace: bool = False, + ) -> None: + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x: Tensor) -> Tensor: + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class TransformerWrapper(nn.Module): + def __init__( + self, + encoder, + decoder, + d_model: int, + two_stage_type="none", # ["none"] only for now + pos_enc_at_input_dec=True, + ): + super().__init__() + + self.encoder = encoder + self.decoder = decoder + self.num_queries = decoder.num_queries if decoder is not None else None + self.pos_enc_at_input_dec = pos_enc_at_input_dec + + # for two stage + assert two_stage_type in ["none"], "unknown param {} of two_stage_type".format( + two_stage_type + ) + self.two_stage_type = two_stage_type + + self._reset_parameters() + self.d_model = d_model + + def _reset_parameters(self): + for n, p in self.named_parameters(): + if p.dim() > 1: + if ( + "box_embed" not in n + and "query_embed" not in n + and "reference_points" not in n + ): + nn.init.xavier_uniform_(p) + + +class MLP(nn.Module): + """Very simple multi-layer perceptron (also called FFN)""" + + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + dropout: float = 0.0, + residual: bool = False, + out_norm: Optional[nn.Module] = None, + ): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.drop = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + # whether to add the output as a residual connection to the input + if residual and input_dim != output_dim: + raise ValueError("residual is only supported if input_dim == output_dim") + self.residual = residual + # whether to apply a normalization layer to the output + assert isinstance(out_norm, nn.Module) or out_norm is None + self.out_norm = out_norm or nn.Identity() + + def forward(self, x): + orig_x = x + for i, layer in enumerate(self.layers): + x = self.drop(F.relu(layer(x))) if i < self.num_layers - 1 else layer(x) + if self.residual: + x = x + orig_x + x = self.out_norm(x) + return x + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def get_clones_seq(module, N): + return nn.Sequential(*[copy.deepcopy(module) for i in range(N)]) + + +def get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + + +def get_activation_module(activation): + """Return an activation function given a string""" + if activation == "relu": + return nn.ReLU + if activation == "gelu": + return nn.GELU + if activation == "glu": + return nn.GLU + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + + +def get_valid_ratio(mask): + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + +def gen_sineembed_for_position(pos_tensor, num_feats=256): + assert num_feats % 2 == 0 + num_feats = num_feats // 2 + # n_query, bs, _ = pos_tensor.size() + # sineembed_tensor = torch.zeros(n_query, bs, 256) + scale = 2 * math.pi + dim_t = torch.arange(num_feats, dtype=torch.float32, device=pos_tensor.device) + dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode="floor")) / num_feats) + x_embed = pos_tensor[:, :, 0] * scale + y_embed = pos_tensor[:, :, 1] * scale + pos_x = x_embed[:, :, None] / dim_t + pos_y = y_embed[:, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3 + ).flatten(2) + pos_y = torch.stack( + (pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3 + ).flatten(2) + if pos_tensor.size(-1) == 2: + pos = torch.cat((pos_y, pos_x), dim=2) + elif pos_tensor.size(-1) == 4: + w_embed = pos_tensor[:, :, 2] * scale + pos_w = w_embed[:, :, None] / dim_t + pos_w = torch.stack( + (pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3 + ).flatten(2) + + h_embed = pos_tensor[:, :, 3] * scale + pos_h = h_embed[:, :, None] / dim_t + pos_h = torch.stack( + (pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3 + ).flatten(2) + + pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) + else: + raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) + return pos + + +class SAM3Output(list): + """ + A class representing the output of a SAM3 model. + It provides an iterable interface that supports different iteration modes, including iterating over all steps per stage, + last step per stage, and flattened output. + Attributes: + output: The output of the SAM3 model, represented as a list of lists. + iter_mode: The current iteration mode. + Example: + >>> output = [[1, 2], [3, 4], [5, 6]] + >>> sam3_output = SAM3Output(output) + >>> for step in sam3_output: + ... print(step) + [1, 2] + [3, 4] + [5, 6] + >>> with SAM3Output.iteration_mode(SAM3Output.IterMode.LAST_STEP_PER_STAGE) as sam3_last_step_out: + ... for step in sam3_last_step_out: + ... print(step) + [2] + [4] + [6] + >>> with SAM3Output.iteration_mode(SAM3Output.IterMode.FLATTENED) as sam3_flattened_out: + ... for step in sam3_flattened_out: + ... print(step) + 1 + 2 + 3 + 4 + 5 + 6 + """ + + class IterMode(Enum): + # Defines the type of iterator over ouptuts. + ALL_STEPS_PER_STAGE = auto() + LAST_STEP_PER_STAGE = auto() + FLATTENED = auto() # Returns each interactivity step as if it is a separate stage (this is used in SAM3Image model) + + def __init__( + self, + output: List[List[Dict]] = None, + iter_mode: IterMode = IterMode.ALL_STEPS_PER_STAGE, + loss_stages: Optional[List[int]] = None, + ): + if output is not None: + assert ( + isinstance(output, list) + and len(output) > 0 + and isinstance(output[0], list) + ), "Expected output to be a list of lists" + self.output = output + else: + self.output = [] + assert isinstance( + iter_mode, SAM3Output.IterMode + ), f"iter_mode shoulf be of enum type 'SAM3Output.IterMode'. Got {type(iter_mode)}" + + self.iter_mode = iter_mode + # We create a weak reference to self to be used in the lambda functions. + # This is to avoid cyclic references and let SAM3Output be garabge collected. + self_ref = weakref.ref(self) + self._mode2iter = { + SAM3Output.IterMode.ALL_STEPS_PER_STAGE: lambda: iter(self_ref().output), + SAM3Output.IterMode.LAST_STEP_PER_STAGE: lambda: ( + inner_list[-1] for inner_list in self_ref().output + ), + SAM3Output.IterMode.FLATTENED: lambda: ( + element for inner_list in self_ref().output for element in inner_list + ), + } + self.loss_stages = loss_stages + + @override + def __iter__(self) -> Iterator: + return self._mode2iter[self.iter_mode]() + + def __getitem__(self, index): + """ + Returns the item at the specified index. + Args: + index (int): The index of the item to return. + Returns: + list or element: The item at the specified index. + """ + assert isinstance(index, int), f"index should be an integer. Got {type(index)}" + if self.iter_mode == SAM3Output.IterMode.ALL_STEPS_PER_STAGE: + return self.output[index] + elif self.iter_mode == SAM3Output.IterMode.LAST_STEP_PER_STAGE: + return self.output[index][-1] + elif self.iter_mode == SAM3Output.IterMode.FLATTENED: + if index == -1: + return self.self.output[-1][-1] + else: + flattened_output = sum(self.output, []) + return flattened_output[index] + + class _IterationMode(AbstractContextManager): + """ + A context manager that temporarily changes the iteration mode of a SAM3Output object. + This class is used internally by the SAM3Output.iteration_mode method. + """ + + def __init__( + self, model_output: "SAM3Output", iter_mode: "SAM3Output.IterMode" + ): + self._model_output = model_output + self._orig_iter_mode = model_output.iter_mode + self._new_iter_mode = iter_mode + + @override + def __enter__(self) -> "SAM3Output": + self._model_output.iter_mode = self._new_iter_mode + return self._model_output + + @override + def __exit__(self, exc_type, exc_value, traceback): + self._model_output.iter_mode = self._orig_iter_mode + return super().__exit__(exc_type, exc_value, traceback) + + @staticmethod + def iteration_mode( + model_output: "SAM3Output", iter_mode: IterMode + ) -> _IterationMode: + """ + Returns a context manager that allows you to temporarily change the iteration mode of the SAM3Output object. + Args: + model_output: The SAM3Output object. + iter_mode: The new iteration mode. + Returns: + SAM3Output._IterationMode: A context manager that changes the iteration mode of the SAM3Output object. + """ + return SAM3Output._IterationMode(model_output=model_output, iter_mode=iter_mode) + + def append(self, item: list): + assert isinstance( + item, list + ), f"Only list items are supported. Got {type(item)}" + self.output.append(item) + + def __repr__(self): + return self.output.__repr__() + + def __len__(self): + if self.iter_mode in [ + SAM3Output.IterMode.ALL_STEPS_PER_STAGE, + SAM3Output.IterMode.LAST_STEP_PER_STAGE, + ]: + return len(self.output) + elif self.iter_mode == SAM3Output.IterMode.FLATTENED: + flattened_output = sum(self.output, []) + return len(flattened_output) diff --git a/detect_tools/sam3/sam3/model/necks.py b/detect_tools/sam3/sam3/model/necks.py new file mode 100644 index 0000000000000000000000000000000000000000..21bf9b850ca8629940d1f2ee262912dafc3a7d78 --- /dev/null +++ b/detect_tools/sam3/sam3/model/necks.py @@ -0,0 +1,125 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Necks are the interface between a vision backbone and the rest of the detection model""" + +from copy import deepcopy +from typing import List, Optional, Tuple + +import torch + +import torch.nn as nn + + +class Sam3DualViTDetNeck(nn.Module): + def __init__( + self, + trunk: nn.Module, + position_encoding: nn.Module, + d_model: int, + scale_factors=(4.0, 2.0, 1.0, 0.5), + add_sam2_neck: bool = False, + ): + """ + SimpleFPN neck a la ViTDet + (From detectron2, very lightly adapted) + It supports a "dual neck" setting, where we have two identical necks (for SAM3 and SAM2), with different weights + + :param trunk: the backbone + :param position_encoding: the positional encoding to use + :param d_model: the dimension of the model + """ + super().__init__() + self.trunk = trunk + self.position_encoding = position_encoding + self.convs = nn.ModuleList() + + self.scale_factors = scale_factors + use_bias = True + dim: int = self.trunk.channel_list[-1] + + for _, scale in enumerate(scale_factors): + current = nn.Sequential() + + if scale == 4.0: + current.add_module( + "dconv_2x2_0", + nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2), + ) + current.add_module( + "gelu", + nn.GELU(), + ) + current.add_module( + "dconv_2x2_1", + nn.ConvTranspose2d(dim // 2, dim // 4, kernel_size=2, stride=2), + ) + out_dim = dim // 4 + elif scale == 2.0: + current.add_module( + "dconv_2x2", + nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2), + ) + out_dim = dim // 2 + elif scale == 1.0: + out_dim = dim + elif scale == 0.5: + current.add_module( + "maxpool_2x2", + nn.MaxPool2d(kernel_size=2, stride=2), + ) + out_dim = dim + else: + raise NotImplementedError(f"scale_factor={scale} is not supported yet.") + + current.add_module( + "conv_1x1", + nn.Conv2d( + in_channels=out_dim, + out_channels=d_model, + kernel_size=1, + bias=use_bias, + ), + ) + current.add_module( + "conv_3x3", + nn.Conv2d( + in_channels=d_model, + out_channels=d_model, + kernel_size=3, + padding=1, + bias=use_bias, + ), + ) + self.convs.append(current) + + self.sam2_convs = None + if add_sam2_neck: + # Assumes sam2 neck is just a clone of the original neck + self.sam2_convs = deepcopy(self.convs) + + def forward( + self, tensor_list: List[torch.Tensor] + ) -> Tuple[ + List[torch.Tensor], + List[torch.Tensor], + Optional[List[torch.Tensor]], + Optional[List[torch.Tensor]], + ]: + xs = self.trunk(tensor_list) + sam3_out, sam3_pos = [], [] + sam2_out, sam2_pos = None, None + if self.sam2_convs is not None: + sam2_out, sam2_pos = [], [] + x = xs[-1] # simpleFPN + for i in range(len(self.convs)): + sam3_x_out = self.convs[i](x) + sam3_pos_out = self.position_encoding(sam3_x_out).to(sam3_x_out.dtype) + sam3_out.append(sam3_x_out) + sam3_pos.append(sam3_pos_out) + + if self.sam2_convs is not None: + sam2_x_out = self.sam2_convs[i](x) + sam2_pos_out = self.position_encoding(sam2_x_out).to(sam2_x_out.dtype) + sam2_out.append(sam2_x_out) + sam2_pos.append(sam2_pos_out) + return sam3_out, sam3_pos, sam2_out, sam2_pos diff --git a/detect_tools/sam3/sam3/model/position_encoding.py b/detect_tools/sam3/sam3/model/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..eb3f4055a7bae489de46368d9b94cf9d33595370 --- /dev/null +++ b/detect_tools/sam3/sam3/model/position_encoding.py @@ -0,0 +1,124 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math +from typing import Optional + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__( + self, + num_pos_feats, + temperature: int = 10000, + normalize: bool = True, + scale: Optional[float] = None, + precompute_resolution: Optional[int] = None, + ): + super().__init__() + assert num_pos_feats % 2 == 0, "Expecting even model width" + self.num_pos_feats = num_pos_feats // 2 + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + self.cache = {} + # Precompute positional encodings under `precompute_resolution` to fill the cache + # and avoid symbolic shape tracing errors in torch.compile in PyTorch 2.4 nightly. + if precompute_resolution is not None: + # We precompute pos enc for stride 4, 8, 16 and 32 to fill `self.cache`. + precompute_sizes = [ + (precompute_resolution // 4, precompute_resolution // 4), + (precompute_resolution // 8, precompute_resolution // 8), + (precompute_resolution // 16, precompute_resolution // 16), + (precompute_resolution // 32, precompute_resolution // 32), + ] + for size in precompute_sizes: + tensors = torch.zeros((1, 1) + size, device="cuda") + self.forward(tensors) + # further clone and detach it in the cache (just to be safe) + self.cache[size] = self.cache[size].clone().detach() + + def _encode_xy(self, x, y): + # The positions are expected to be normalized + assert len(x) == len(y) and x.ndim == y.ndim == 1 + x_embed = x * self.scale + y_embed = y * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, None] / dim_t + pos_y = y_embed[:, None] / dim_t + pos_x = torch.stack( + (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2 + ).flatten(1) + pos_y = torch.stack( + (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2 + ).flatten(1) + return pos_x, pos_y + + @torch.no_grad() + def encode_boxes(self, x, y, w, h): + pos_x, pos_y = self._encode_xy(x, y) + pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1) + return pos + + encode = encode_boxes # Backwards compatibility + + @torch.no_grad() + def encode_points(self, x, y, labels): + (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape + assert bx == by and nx == ny and bx == bl and nx == nl + pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten()) + pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1) + pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2) + return pos + + @torch.no_grad() + def forward(self, x): + cache_key = None + cache_key = (x.shape[-2], x.shape[-1]) + if cache_key in self.cache: + return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1) + y_embed = ( + torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device) + .view(1, -1, 1) + .repeat(x.shape[0], 1, x.shape[-1]) + ) + x_embed = ( + torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device) + .view(1, 1, -1) + .repeat(x.shape[0], x.shape[-2], 1) + ) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + if cache_key is not None: + self.cache[cache_key] = pos[0] + return pos diff --git a/detect_tools/sam3/sam3/model/sam1_task_predictor.py b/detect_tools/sam3/sam3/model/sam1_task_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..f5e49b1ebc8f342c57ba265c0023f906608a93a1 --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam1_task_predictor.py @@ -0,0 +1,458 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +import torch.nn as nn +from PIL.Image import Image + +from sam3.model.sam3_tracker_base import Sam3TrackerBase +from sam3.model.utils.sam1_utils import SAM2Transforms + + +# Adapted from https://github.com/facebookresearch/sam2/blob/main/sam2/sam2_image_predictor.py +class SAM3InteractiveImagePredictor(nn.Module): + def __init__( + self, + sam_model: Sam3TrackerBase, + mask_threshold=0.0, + max_hole_area=256.0, + max_sprinkle_area=0.0, + **kwargs, + ) -> None: + """ + Uses SAM-3 to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model : The model to use for mask prediction. + mask_threshold (float): The threshold to use when converting mask logits + to binary masks. Masks are thresholded at 0 by default. + max_hole_area (int): If max_hole_area > 0, we fill small holes in up to + the maximum area of max_hole_area in low_res_masks. + max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to + the maximum area of max_sprinkle_area in low_res_masks. + """ + super().__init__() + self.model = sam_model + self._transforms = SAM2Transforms( + resolution=self.model.image_size, + mask_threshold=mask_threshold, + max_hole_area=max_hole_area, + max_sprinkle_area=max_sprinkle_area, + ) + + # Predictor state + self._is_image_set = False + self._features = None + self._orig_hw = None + # Whether the predictor is set for single image or a batch of images + self._is_batch = False + + # Predictor config + self.mask_threshold = mask_threshold + + # Spatial dim for backbone feature maps + self._bb_feat_sizes = [ + (288, 288), + (144, 144), + (72, 72), + ] + + @torch.no_grad() + def set_image( + self, + image: Union[np.ndarray, Image], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image + with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + self.reset_predictor() + # Transform the image to the form expected by the model + if isinstance(image, np.ndarray): + logging.info("For numpy array image, we assume (HxWxC) format") + self._orig_hw = [image.shape[:2]] + elif isinstance(image, Image): + w, h = image.size + self._orig_hw = [(h, w)] + else: + raise NotImplementedError("Image format not supported") + + input_image = self._transforms(image) + input_image = input_image[None, ...].to(self.device) + + assert ( + len(input_image.shape) == 4 and input_image.shape[1] == 3 + ), f"input_image must be of size 1x3xHxW, got {input_image.shape}" + logging.info("Computing image embeddings for the provided image...") + backbone_out = self.model.forward_image(input_image) + ( + _, + vision_feats, + _, + _, + ) = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + logging.info("Image embeddings computed.") + + @torch.no_grad() + def set_image_batch( + self, + image_list: List[Union[np.ndarray]], + ) -> None: + """ + Calculates the image embeddings for the provided image batch, allowing + masks to be predicted with the 'predict_batch' method. + + Arguments: + image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray + with pixel values in [0, 255]. + """ + self.reset_predictor() + assert isinstance(image_list, list) + self._orig_hw = [] + for image in image_list: + assert isinstance( + image, np.ndarray + ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC" + self._orig_hw.append(image.shape[:2]) + # Transform the image to the form expected by the model + img_batch = self._transforms.forward_batch(image_list) + img_batch = img_batch.to(self.device) + batch_size = img_batch.shape[0] + assert ( + len(img_batch.shape) == 4 and img_batch.shape[1] == 3 + ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}" + logging.info("Computing image embeddings for the provided images...") + backbone_out = self.model.forward_image(img_batch) + ( + _, + vision_feats, + _, + _, + ) = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + self._is_batch = True + logging.info("Image embeddings computed.") + + def predict_batch( + self, + point_coords_batch: List[np.ndarray] = None, + point_labels_batch: List[np.ndarray] = None, + box_batch: List[np.ndarray] = None, + mask_input_batch: List[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images. + It returns a tuple of lists of masks, ious, and low_res_masks_logits. + """ + assert self._is_batch, "This function should only be used when in batched mode" + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image_batch(...) before mask prediction." + ) + num_images = len(self._features["image_embed"]) + all_masks = [] + all_ious = [] + all_low_res_masks = [] + for img_idx in range(num_images): + # Transform input prompts + point_coords = ( + point_coords_batch[img_idx] if point_coords_batch is not None else None + ) + point_labels = ( + point_labels_batch[img_idx] if point_labels_batch is not None else None + ) + box = box_batch[img_idx] if box_batch is not None else None + mask_input = ( + mask_input_batch[img_idx] if mask_input_batch is not None else None + ) + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, + point_labels, + box, + mask_input, + normalize_coords, + img_idx=img_idx, + ) + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + img_idx=img_idx, + ) + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = ( + iou_predictions.squeeze(0).float().detach().cpu().numpy() + ) + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + all_masks.append(masks_np) + all_ious.append(iou_predictions_np) + all_low_res_masks.append(low_res_masks_np) + + return all_masks, all_ious, all_low_res_masks + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + # Transform input prompts + + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, point_labels, box, mask_input, normalize_coords + ) + + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + ) + + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy() + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + def _prep_prompts( + self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1 + ): + unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = torch.as_tensor( + point_coords, dtype=torch.float, device=self.device + ) + unnorm_coords = self._transforms.transform_coords( + point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) + labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + if len(unnorm_coords.shape) == 2: + unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...] + if box is not None: + box = torch.as_tensor(box, dtype=torch.float, device=self.device) + unnorm_box = self._transforms.transform_boxes( + box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) # Bx2x2 + if mask_logits is not None: + mask_input = torch.as_tensor( + mask_logits, dtype=torch.float, device=self.device + ) + if len(mask_input.shape) == 3: + mask_input = mask_input[None, :, :, :] + return mask_input, unnorm_coords, labels, unnorm_box + + @torch.no_grad() + def _predict( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + img_idx: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using SAM2Transforms. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + if point_coords is not None: + concat_points = (point_coords, point_labels) + else: + concat_points = None + + # Embed prompts + if boxes is not None: + box_coords = boxes.reshape(-1, 2, 2) + box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device) + box_labels = box_labels.repeat(boxes.size(0), 1) + # we merge "boxes" and "points" into a single "concat_points" input (where + # boxes are added at the beginning) to sam_prompt_encoder + if concat_points is not None: + concat_coords = torch.cat([box_coords, concat_points[0]], dim=1) + concat_labels = torch.cat([box_labels, concat_points[1]], dim=1) + concat_points = (concat_coords, concat_labels) + else: + concat_points = (box_coords, box_labels) + + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( + points=concat_points, + boxes=None, + masks=mask_input, + ) + + # Predict masks + batched_mode = ( + concat_points is not None and concat_points[0].shape[0] > 1 + ) # multi object prediction + high_res_features = [ + feat_level[img_idx].unsqueeze(0) + for feat_level in self._features["high_res_feats"] + ] + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( + image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), + image_pe=self.model.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=batched_mode, + high_res_features=high_res_features, + ) + + # Upscale the masks to the original image resolution + masks = self._transforms.postprocess_masks( + low_res_masks, self._orig_hw[img_idx] + ) + low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0) + if not return_logits: + masks = masks > self.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert ( + self._features is not None + ), "Features must exist if an image has been set." + return self._features["image_embed"] + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_predictor(self) -> None: + """ + Resets the image embeddings and other state variables. + """ + self._is_image_set = False + self._features = None + self._orig_hw = None + self._is_batch = False diff --git a/detect_tools/sam3/sam3/model/sam3_image.py b/detect_tools/sam3/sam3/model/sam3_image.py new file mode 100644 index 0000000000000000000000000000000000000000..aafe520b9b923564cd8f048e4bfab6fef79417ae --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_image.py @@ -0,0 +1,883 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import os +from copy import deepcopy +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch + +from sam3.model.model_misc import SAM3Output + +from sam3.model.sam1_task_predictor import SAM3InteractiveImagePredictor +from sam3.model.vl_combiner import SAM3VLBackbone +from sam3.perflib.nms import nms_masks + +from sam3.train.data.collator import BatchedDatapoint + +from .act_ckpt_utils import activation_ckpt_wrapper + +from .box_ops import box_cxcywh_to_xyxy + +from .geometry_encoders import Prompt +from .model_misc import inverse_sigmoid + + +def _update_out(out, out_name, out_value, auxiliary=True, update_aux=True): + out[out_name] = out_value[-1] if auxiliary else out_value + if auxiliary and update_aux: + if "aux_outputs" not in out: + out["aux_outputs"] = [{} for _ in range(len(out_value) - 1)] + assert len(out["aux_outputs"]) == len(out_value) - 1 + for aux_output, aux_value in zip(out["aux_outputs"], out_value[:-1]): + aux_output[out_name] = aux_value + + +class Sam3Image(torch.nn.Module): + TEXT_ID_FOR_TEXT = 0 + TEXT_ID_FOR_VISUAL = 1 + TEXT_ID_FOR_GEOMETRIC = 2 + + def __init__( + self, + backbone: SAM3VLBackbone, + transformer, + input_geometry_encoder, + segmentation_head=None, + num_feature_levels=1, + o2m_mask_predict=True, + dot_prod_scoring=None, + use_instance_query: bool = True, + multimask_output: bool = True, + use_act_checkpoint_seg_head: bool = True, + interactivity_in_encoder: bool = True, + matcher=None, + use_dot_prod_scoring=True, + supervise_joint_box_scores: bool = False, # only relevant if using presence token/score + detach_presence_in_joint_score: bool = False, # only relevant if using presence token/score + separate_scorer_for_instance: bool = False, + num_interactive_steps_val: int = 0, + inst_interactive_predictor: SAM3InteractiveImagePredictor = None, + **kwargs, + ): + super().__init__() + self.backbone = backbone + self.geometry_encoder = input_geometry_encoder + self.transformer = transformer + self.hidden_dim = transformer.d_model + self.num_feature_levels = num_feature_levels + self.segmentation_head = segmentation_head + + self.o2m_mask_predict = o2m_mask_predict + + self.dot_prod_scoring = dot_prod_scoring + self.use_act_checkpoint_seg_head = use_act_checkpoint_seg_head + self.interactivity_in_encoder = interactivity_in_encoder + self.matcher = matcher + + self.num_interactive_steps_val = num_interactive_steps_val + self.use_dot_prod_scoring = use_dot_prod_scoring + + if self.use_dot_prod_scoring: + assert dot_prod_scoring is not None + self.dot_prod_scoring = dot_prod_scoring + self.instance_dot_prod_scoring = None + if separate_scorer_for_instance: + self.instance_dot_prod_scoring = deepcopy(dot_prod_scoring) + else: + self.class_embed = torch.nn.Linear(self.hidden_dim, 1) + self.instance_class_embed = None + if separate_scorer_for_instance: + self.instance_class_embed = deepcopy(self.class_embed) + + self.supervise_joint_box_scores = supervise_joint_box_scores + self.detach_presence_in_joint_score = detach_presence_in_joint_score + + # verify the number of queries for O2O and O2M + num_o2o_static = self.transformer.decoder.num_queries + num_o2m_static = self.transformer.decoder.num_o2m_queries + assert num_o2m_static == (num_o2o_static if self.transformer.decoder.dac else 0) + self.dac = self.transformer.decoder.dac + + self.use_instance_query = use_instance_query + self.multimask_output = multimask_output + + self.inst_interactive_predictor = inst_interactive_predictor + + @property + def device(self): + self._device = getattr(self, "_device", None) or next(self.parameters()).device + return self._device + + def to(self, *args, **kwargs): + # clear cached _device in case the model is moved to a different device + self._device = None + return super().to(*args, **kwargs) + + def _get_img_feats(self, backbone_out, img_ids): + """Retrieve correct image features from backbone output.""" + if "backbone_fpn" in backbone_out: + if "id_mapping" in backbone_out and backbone_out["id_mapping"] is not None: + img_ids = backbone_out["id_mapping"][img_ids] + # If this assert fails, it likely means we're requesting different img_ids (perhaps a different frame?) + # We currently don't expect this to happen. We could technically trigger a recompute here, + # but likely at the cost of a cpu<->gpu sync point, which would deteriorate perf + torch._assert_async((img_ids >= 0).all()) + + vis_feats = backbone_out["backbone_fpn"][-self.num_feature_levels :] + vis_pos_enc = backbone_out["vision_pos_enc"][-self.num_feature_levels :] + vis_feat_sizes = [x.shape[-2:] for x in vis_pos_enc] # (H, W) shapes + # index and flatten visual features NxCxHxW => HWxNxC (batch-first => seq-first) + img_feats = [x[img_ids].flatten(2).permute(2, 0, 1) for x in vis_feats] + img_pos_embeds = [ + x[img_ids].flatten(2).permute(2, 0, 1) for x in vis_pos_enc + ] + return backbone_out, img_feats, img_pos_embeds, vis_feat_sizes + + # Image features not available in backbone output, so we compute them on the fly + # This case likely occurs for video. In that case, we want to forward only the current frame + img_batch = backbone_out["img_batch_all_stages"] + if img_ids.numel() > 1: + # Only forward backbone on unique image ids to avoid repetitive computation + unique_ids, _ = torch.unique(img_ids, return_inverse=True) + else: + unique_ids, _ = img_ids, slice(None) + # Compute the image features on those unique image ids + # note: we allow using a list (or other indexable types) of tensors as img_batch + # (e.g. for async frame loading in demo). In this case we index img_batch.tensors directly + if isinstance(img_batch, torch.Tensor): + image = img_batch[unique_ids] + elif unique_ids.numel() == 1: + image = img_batch[unique_ids.item()].unsqueeze(0) + else: + image = torch.stack([img_batch[i] for i in unique_ids.tolist()]) + # `img_batch` might be fp16 and offloaded to CPU + image = image.to(dtype=torch.float32, device=self.device) + # Next time we call this function, we want to remember which indices we computed + id_mapping = torch.full( + (len(img_batch),), -1, dtype=torch.long, device=self.device + ) + id_mapping[unique_ids] = torch.arange(len(unique_ids), device=self.device) + backbone_out = { + **backbone_out, + **self.backbone.forward_image(image), + "id_mapping": id_mapping, + } + assert "backbone_fpn" in backbone_out + return self._get_img_feats(backbone_out, img_ids=img_ids) + + def _encode_prompt( + self, + backbone_out, + find_input, + geometric_prompt, + visual_prompt_embed=None, + visual_prompt_mask=None, + encode_text=True, + prev_mask_pred=None, + ): + # index text features (note that regardless of early or late fusion, the batch size of + # `txt_feats` is always the number of *prompts* in the encoder) + txt_ids = find_input.text_ids + txt_feats = backbone_out["language_features"][:, txt_ids] + txt_masks = backbone_out["language_mask"][txt_ids] + + feat_tuple = self._get_img_feats(backbone_out, find_input.img_ids) + backbone_out, img_feats, img_pos_embeds, vis_feat_sizes = feat_tuple + + if prev_mask_pred is not None: + img_feats = [img_feats[-1] + prev_mask_pred] + # Encode geometry + geo_feats, geo_masks = self.geometry_encoder( + geo_prompt=geometric_prompt, + img_feats=img_feats, + img_sizes=vis_feat_sizes, + img_pos_embeds=img_pos_embeds, + ) + if visual_prompt_embed is None: + visual_prompt_embed = torch.zeros( + (0, *geo_feats.shape[1:]), device=geo_feats.device + ) + visual_prompt_mask = torch.zeros( + (*geo_masks.shape[:-1], 0), + device=geo_masks.device, + dtype=geo_masks.dtype, + ) + if encode_text: + prompt = torch.cat([txt_feats, geo_feats, visual_prompt_embed], dim=0) + prompt_mask = torch.cat([txt_masks, geo_masks, visual_prompt_mask], dim=1) + else: + prompt = torch.cat([geo_feats, visual_prompt_embed], dim=0) + prompt_mask = torch.cat([geo_masks, visual_prompt_mask], dim=1) + return prompt, prompt_mask, backbone_out + + def _run_encoder( + self, + backbone_out, + find_input, + prompt, + prompt_mask, + encoder_extra_kwargs: Optional[Dict] = None, + ): + feat_tuple = self._get_img_feats(backbone_out, find_input.img_ids) + backbone_out, img_feats, img_pos_embeds, vis_feat_sizes = feat_tuple + + # Run the encoder + prompt_pos_embed = torch.zeros_like(prompt) + # make a copy of the image feature lists since the encoder may modify these lists in-place + memory = self.transformer.encoder( + src=img_feats.copy(), + src_key_padding_mask=None, + src_pos=img_pos_embeds.copy(), + prompt=prompt, + prompt_pos=prompt_pos_embed, + prompt_key_padding_mask=prompt_mask, + feat_sizes=vis_feat_sizes, + encoder_extra_kwargs=encoder_extra_kwargs, + ) + encoder_out = { + # encoded image features + "encoder_hidden_states": memory["memory"], + "pos_embed": memory["pos_embed"], + "padding_mask": memory["padding_mask"], + "level_start_index": memory["level_start_index"], + "spatial_shapes": memory["spatial_shapes"], + "valid_ratios": memory["valid_ratios"], + "vis_feat_sizes": vis_feat_sizes, + # encoded text features (or other prompts) + "prompt_before_enc": prompt, + "prompt_after_enc": memory.get("memory_text", prompt), + "prompt_mask": prompt_mask, + } + return backbone_out, encoder_out, feat_tuple + + def _run_decoder( + self, + pos_embed, + memory, + src_mask, + out, + prompt, + prompt_mask, + encoder_out, + ): + bs = memory.shape[1] + query_embed = self.transformer.decoder.query_embed.weight + tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) + + apply_dac = self.transformer.decoder.dac and self.training + hs, reference_boxes, dec_presence_out, dec_presence_feats = ( + self.transformer.decoder( + tgt=tgt, + memory=memory, + memory_key_padding_mask=src_mask, + pos=pos_embed, + reference_boxes=None, + level_start_index=encoder_out["level_start_index"], + spatial_shapes=encoder_out["spatial_shapes"], + valid_ratios=encoder_out["valid_ratios"], + tgt_mask=None, + memory_text=prompt, + text_attention_mask=prompt_mask, + apply_dac=apply_dac, + ) + ) + hs = hs.transpose(1, 2) # seq-first to batch-first + reference_boxes = reference_boxes.transpose(1, 2) # seq-first to batch-first + if dec_presence_out is not None: + # seq-first to batch-first + dec_presence_out = dec_presence_out.transpose(1, 2) + + out["presence_feats"] = dec_presence_feats + self._update_scores_and_boxes( + out, + hs, + reference_boxes, + prompt, + prompt_mask, + dec_presence_out=dec_presence_out, + ) + return out, hs + + def _update_scores_and_boxes( + self, + out, + hs, + reference_boxes, + prompt, + prompt_mask, + dec_presence_out=None, + is_instance_prompt=False, + ): + apply_dac = self.transformer.decoder.dac and self.training + num_o2o = (hs.size(2) // 2) if apply_dac else hs.size(2) + num_o2m = hs.size(2) - num_o2o + assert num_o2m == (num_o2o if apply_dac else 0) + out["queries"] = hs[-1][:, :num_o2o] # remove o2m queries if there are any + # score prediction + if self.use_dot_prod_scoring: + dot_prod_scoring_head = self.dot_prod_scoring + if is_instance_prompt and self.instance_dot_prod_scoring is not None: + dot_prod_scoring_head = self.instance_dot_prod_scoring + outputs_class = dot_prod_scoring_head(hs, prompt, prompt_mask) + else: + class_embed_head = self.class_embed + if is_instance_prompt and self.instance_class_embed is not None: + class_embed_head = self.instance_class_embed + outputs_class = class_embed_head(hs) + + # box prediction + box_head = self.transformer.decoder.bbox_embed + if ( + is_instance_prompt + and self.transformer.decoder.instance_bbox_embed is not None + ): + box_head = self.transformer.decoder.instance_bbox_embed + anchor_box_offsets = box_head(hs) + reference_boxes_inv_sig = inverse_sigmoid(reference_boxes) + outputs_coord = (reference_boxes_inv_sig + anchor_box_offsets).sigmoid() + outputs_boxes_xyxy = box_cxcywh_to_xyxy(outputs_coord) + + if dec_presence_out is not None: + _update_out( + out, "presence_logit_dec", dec_presence_out, update_aux=self.training + ) + + if self.supervise_joint_box_scores: + assert dec_presence_out is not None + prob_dec_presence_out = dec_presence_out.clone().sigmoid() + if self.detach_presence_in_joint_score: + prob_dec_presence_out = prob_dec_presence_out.detach() + + outputs_class = inverse_sigmoid( + outputs_class.sigmoid() * prob_dec_presence_out.unsqueeze(2) + ).clamp(min=-10.0, max=10.0) + + _update_out( + out, "pred_logits", outputs_class[:, :, :num_o2o], update_aux=self.training + ) + _update_out( + out, "pred_boxes", outputs_coord[:, :, :num_o2o], update_aux=self.training + ) + _update_out( + out, + "pred_boxes_xyxy", + outputs_boxes_xyxy[:, :, :num_o2o], + update_aux=self.training, + ) + if num_o2m > 0 and self.training: + _update_out( + out, + "pred_logits_o2m", + outputs_class[:, :, num_o2o:], + update_aux=self.training, + ) + _update_out( + out, + "pred_boxes_o2m", + outputs_coord[:, :, num_o2o:], + update_aux=self.training, + ) + _update_out( + out, + "pred_boxes_xyxy_o2m", + outputs_boxes_xyxy[:, :, num_o2o:], + update_aux=self.training, + ) + + def _run_segmentation_heads( + self, + out, + backbone_out, + img_ids, + vis_feat_sizes, + encoder_hidden_states, + prompt, + prompt_mask, + hs, + ): + apply_dac = self.transformer.decoder.dac and self.training + if self.segmentation_head is not None: + num_o2o = (hs.size(2) // 2) if apply_dac else hs.size(2) + num_o2m = hs.size(2) - num_o2o + obj_queries = hs if self.o2m_mask_predict else hs[:, :, :num_o2o] + seg_head_outputs = activation_ckpt_wrapper(self.segmentation_head)( + backbone_feats=backbone_out["backbone_fpn"], + obj_queries=obj_queries, + image_ids=img_ids, + encoder_hidden_states=encoder_hidden_states, + act_ckpt_enable=self.training and self.use_act_checkpoint_seg_head, + prompt=prompt, + prompt_mask=prompt_mask, + ) + aux_masks = False # self.aux_loss and self.segmentation_head.aux_masks + for k, v in seg_head_outputs.items(): + if k in self.segmentation_head.instance_keys: + _update_out(out, k, v[:, :num_o2o], auxiliary=aux_masks) + if ( + self.o2m_mask_predict and num_o2m > 0 + ): # handle o2m mask prediction + _update_out( + out, f"{k}_o2m", v[:, num_o2o:], auxiliary=aux_masks + ) + else: + out[k] = v + else: + backbone_out.pop("backbone_fpn", None) + + def _get_best_mask(self, out): + prev_mask_idx = out["pred_logits"].argmax(dim=1).squeeze(1) + batch_idx = torch.arange( + out["pred_logits"].shape[0], device=prev_mask_idx.device + ) + prev_mask_pred = out["pred_masks"][batch_idx, prev_mask_idx][:, None] + # Downsample mask to match image resolution. + prev_mask_pred = self.geometry_encoder.mask_encoder.mask_downsampler( + prev_mask_pred + ) + prev_mask_pred = prev_mask_pred.flatten(-2).permute(2, 0, 1) + + return prev_mask_pred + + def forward_grounding( + self, + backbone_out, + find_input, + find_target, + geometric_prompt: Prompt, + ): + with torch.profiler.record_function("SAM3Image._encode_prompt"): + prompt, prompt_mask, backbone_out = self._encode_prompt( + backbone_out, find_input, geometric_prompt + ) + # Run the encoder + with torch.profiler.record_function("SAM3Image._run_encoder"): + backbone_out, encoder_out, _ = self._run_encoder( + backbone_out, find_input, prompt, prompt_mask + ) + out = { + "encoder_hidden_states": encoder_out["encoder_hidden_states"], + "prev_encoder_out": { + "encoder_out": encoder_out, + "backbone_out": backbone_out, + }, + } + + # Run the decoder + with torch.profiler.record_function("SAM3Image._run_decoder"): + out, hs = self._run_decoder( + memory=out["encoder_hidden_states"], + pos_embed=encoder_out["pos_embed"], + src_mask=encoder_out["padding_mask"], + out=out, + prompt=prompt, + prompt_mask=prompt_mask, + encoder_out=encoder_out, + ) + + # Run segmentation heads + with torch.profiler.record_function("SAM3Image._run_segmentation_heads"): + self._run_segmentation_heads( + out=out, + backbone_out=backbone_out, + img_ids=find_input.img_ids, + vis_feat_sizes=encoder_out["vis_feat_sizes"], + encoder_hidden_states=out["encoder_hidden_states"], + prompt=prompt, + prompt_mask=prompt_mask, + hs=hs, + ) + + if self.training or self.num_interactive_steps_val > 0: + self._compute_matching(out, self.back_convert(find_target)) + return out + + def _postprocess_out(self, out: Dict, multimask_output: bool = False): + # For multimask output, during eval we return the single best mask with the dict keys expected by the evaluators, but also return the multimasks output with new keys. + num_mask_boxes = out["pred_boxes"].size(1) + if not self.training and multimask_output and num_mask_boxes > 1: + out["multi_pred_logits"] = out["pred_logits"] + if "pred_masks" in out: + out["multi_pred_masks"] = out["pred_masks"] + out["multi_pred_boxes"] = out["pred_boxes"] + out["multi_pred_boxes_xyxy"] = out["pred_boxes_xyxy"] + + best_mask_idx = out["pred_logits"].argmax(1).squeeze(1) + batch_idx = torch.arange(len(best_mask_idx), device=best_mask_idx.device) + + out["pred_logits"] = out["pred_logits"][batch_idx, best_mask_idx].unsqueeze( + 1 + ) + if "pred_masks" in out: + out["pred_masks"] = out["pred_masks"][ + batch_idx, best_mask_idx + ].unsqueeze(1) + out["pred_boxes"] = out["pred_boxes"][batch_idx, best_mask_idx].unsqueeze(1) + out["pred_boxes_xyxy"] = out["pred_boxes_xyxy"][ + batch_idx, best_mask_idx + ].unsqueeze(1) + + return out + + def _get_dummy_prompt(self, num_prompts=1): + device = self.device + geometric_prompt = Prompt( + box_embeddings=torch.zeros(0, num_prompts, 4, device=device), + box_mask=torch.zeros(num_prompts, 0, device=device, dtype=torch.bool), + ) + return geometric_prompt + + def forward(self, input: BatchedDatapoint): + device = self.device + backbone_out = {"img_batch_all_stages": input.img_batch} + backbone_out.update(self.backbone.forward_image(input.img_batch)) + num_frames = len(input.find_inputs) + assert num_frames == 1 + + text_outputs = self.backbone.forward_text(input.find_text_batch, device=device) + backbone_out.update(text_outputs) + + previous_stages_out = SAM3Output( + iter_mode=SAM3Output.IterMode.LAST_STEP_PER_STAGE + ) + + find_input = input.find_inputs[0] + find_target = input.find_targets[0] + + if find_input.input_points is not None and find_input.input_points.numel() > 0: + print("Warning: Point prompts are ignored in PCS.") + + num_interactive_steps = 0 if self.training else self.num_interactive_steps_val + geometric_prompt = Prompt( + box_embeddings=find_input.input_boxes, + box_mask=find_input.input_boxes_mask, + box_labels=find_input.input_boxes_label, + ) + + # Init vars that are shared across the loop. + stage_outs = [] + for cur_step in range(num_interactive_steps + 1): + if cur_step > 0: + # We sample interactive geometric prompts (boxes, points) + geometric_prompt, _ = self.interactive_prompt_sampler.sample( + geo_prompt=geometric_prompt, + find_target=find_target, + previous_out=stage_outs[-1], + ) + out = self.forward_grounding( + backbone_out=backbone_out, + find_input=find_input, + find_target=find_target, + geometric_prompt=geometric_prompt.clone(), + ) + stage_outs.append(out) + + previous_stages_out.append(stage_outs) + return previous_stages_out + + def _compute_matching(self, out, targets): + out["indices"] = self.matcher(out, targets) + for aux_out in out.get("aux_outputs", []): + aux_out["indices"] = self.matcher(aux_out, targets) + + def back_convert(self, targets): + batched_targets = { + "boxes": targets.boxes.view(-1, 4), + "boxes_xyxy": box_cxcywh_to_xyxy(targets.boxes.view(-1, 4)), + "boxes_padded": targets.boxes_padded, + "positive_map": targets.boxes.new_ones(len(targets.boxes), 1), + "num_boxes": targets.num_boxes, + "masks": targets.segments, + "semantic_masks": targets.semantic_segments, + "is_valid_mask": targets.is_valid_segment, + "is_exhaustive": targets.is_exhaustive, + "object_ids_packed": targets.object_ids, + "object_ids_padded": targets.object_ids_padded, + } + return batched_targets + + def predict_inst( + self, + inference_state, + **kwargs, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + orig_h, orig_w = ( + inference_state["original_height"], + inference_state["original_width"], + ) + backbone_out = inference_state["backbone_out"]["sam2_backbone_out"] + ( + _, + vision_feats, + _, + _, + ) = self.inst_interactive_predictor.model._prepare_backbone_features( + backbone_out + ) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + vision_feats[-1] = ( + vision_feats[-1] + self.inst_interactive_predictor.model.no_mem_embed + ) + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip( + vision_feats[::-1], self.inst_interactive_predictor._bb_feat_sizes[::-1] + ) + ][::-1] + self.inst_interactive_predictor._features = { + "image_embed": feats[-1], + "high_res_feats": feats[:-1], + } + self.inst_interactive_predictor._is_image_set = True + self.inst_interactive_predictor._orig_hw = [(orig_h, orig_w)] + res = self.inst_interactive_predictor.predict(**kwargs) + self.inst_interactive_predictor._features = None + self.inst_interactive_predictor._is_image_set = False + return res + + def predict_inst_batch( + self, + inference_state, + *args, + **kwargs, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + backbone_out = inference_state["backbone_out"]["sam2_backbone_out"] + ( + _, + vision_feats, + _, + _, + ) = self.inst_interactive_predictor.model._prepare_backbone_features( + backbone_out + ) + # Add no_mem_embed, which is added to the lowest res feat. map during training on videos + vision_feats[-1] = ( + vision_feats[-1] + self.inst_interactive_predictor.model.no_mem_embed + ) + batch_size = vision_feats[-1].shape[1] + orig_heights, orig_widths = ( + inference_state["original_heights"], + inference_state["original_widths"], + ) + assert ( + batch_size == len(orig_heights) == len(orig_widths) + ), f"Batch size mismatch in predict_inst_batch. Got {batch_size}, {len(orig_heights)}, {len(orig_widths)}" + feats = [ + feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) + for feat, feat_size in zip( + vision_feats[::-1], self.inst_interactive_predictor._bb_feat_sizes[::-1] + ) + ][::-1] + self.inst_interactive_predictor._features = { + "image_embed": feats[-1], + "high_res_feats": feats[:-1], + } + self.inst_interactive_predictor._is_image_set = True + self.inst_interactive_predictor._is_batch = True + self.inst_interactive_predictor._orig_hw = [ + (orig_h, orig_w) for orig_h, orig_w in zip(orig_heights, orig_widths) + ] + res = self.inst_interactive_predictor.predict_batch(*args, **kwargs) + self.inst_interactive_predictor._features = None + self.inst_interactive_predictor._is_image_set = False + self.inst_interactive_predictor._is_batch = False + return res + + +class Sam3ImageOnVideoMultiGPU(Sam3Image): + def __init__( + self, *args, async_all_gather=True, gather_backbone_out=None, **kwargs + ): + super().__init__(*args, **kwargs) + self.rank = int(os.getenv("RANK", "0")) + self.world_size = int(os.getenv("WORLD_SIZE", "1")) + self.async_all_gather = async_all_gather + + # if gather_backbone is not set, default to gathering only for `SAM3VLBackbone` + if gather_backbone_out is None: + gather_backbone_out = isinstance(self.backbone, SAM3VLBackbone) + self.gather_backbone_out = gather_backbone_out + + def forward_video_grounding_multigpu( + self, + backbone_out, + find_inputs, + geometric_prompt: Prompt, + frame_idx, + num_frames, + # `multigpu_buffer` is a dict to cache detector's outputs in a chunk between different calls + multigpu_buffer, + track_in_reverse=False, + # whether to also return the SAM2 backbone features + return_sam2_backbone_feats=False, + # whether to perform NMS and suppress the scores of those detections removed by NMS + run_nms=False, + nms_prob_thresh=None, + nms_iou_thresh=None, + **kwargs, + ): + """ + Compute the detector's detection outputs in a distributed manner, where all GPUs process + a chunk of frames (equal to the number of GPUs) at once and store them in cache. + """ + # Step 1: fetch the detector outputs in the current chunk from buffer + frame_idx_curr_b = frame_idx - frame_idx % self.world_size + frame_idx_curr_e = min(frame_idx_curr_b + self.world_size, num_frames) + # in case the current frame's detection results are not in the buffer yet, build the current chunk + # (this should only happen on the first chunk, since we are also building the next chunk below) + if frame_idx not in multigpu_buffer: + with torch.profiler.record_function("build_multigpu_buffer_next_chunk1"): + self._build_multigpu_buffer_next_chunk( + backbone_out=backbone_out, + find_inputs=find_inputs, + geometric_prompt=geometric_prompt, + frame_idx_begin=frame_idx_curr_b, + frame_idx_end=frame_idx_curr_e, + num_frames=num_frames, + multigpu_buffer=multigpu_buffer, + run_nms=run_nms, + nms_prob_thresh=nms_prob_thresh, + nms_iou_thresh=nms_iou_thresh, + ) + + # read out the current frame's results from `multigpu_buffer` + out = {} + for k, (v, handle) in multigpu_buffer[frame_idx].items(): + if k.startswith("sam2_backbone_") and not return_sam2_backbone_feats: + continue + if handle is not None: + handle.wait() # wait for async all-gather to finish + out[k] = v + + # Step 2: remove detection outputs of the previous chunk from cache to save GPU memory + if not track_in_reverse and frame_idx_curr_b - self.world_size >= 0: + frame_idx_prev_e = frame_idx_curr_b + frame_idx_prev_b = frame_idx_curr_b - self.world_size + elif track_in_reverse and frame_idx_curr_e < num_frames: + frame_idx_prev_b = frame_idx_curr_e + frame_idx_prev_e = min(frame_idx_prev_b + self.world_size, num_frames) + else: + frame_idx_prev_b = frame_idx_prev_e = None + if frame_idx_prev_b is not None: + for frame_idx_rm in range(frame_idx_prev_b, frame_idx_prev_e): + multigpu_buffer.pop(frame_idx_rm, None) + + # Step 3: compute and cache detection outputs of the next chunk ahead of time + # (so that we can overlap computation with all-gather transfer) + if not track_in_reverse and frame_idx_curr_e < num_frames: + frame_idx_next_b = frame_idx_curr_e + frame_idx_next_e = min(frame_idx_next_b + self.world_size, num_frames) + elif track_in_reverse and frame_idx_curr_b - self.world_size >= 0: + frame_idx_next_e = frame_idx_curr_b + frame_idx_next_b = frame_idx_curr_b - self.world_size + else: + frame_idx_next_b = frame_idx_next_e = None + if frame_idx_next_b is not None and frame_idx_next_b not in multigpu_buffer: + with torch.profiler.record_function("build_multigpu_buffer_next_chunk2"): + self._build_multigpu_buffer_next_chunk( + backbone_out=backbone_out, + find_inputs=find_inputs, + geometric_prompt=geometric_prompt, + frame_idx_begin=frame_idx_next_b, + frame_idx_end=frame_idx_next_e, + num_frames=num_frames, + multigpu_buffer=multigpu_buffer, + run_nms=run_nms, + nms_prob_thresh=nms_prob_thresh, + nms_iou_thresh=nms_iou_thresh, + ) + + return out, backbone_out + + def _build_multigpu_buffer_next_chunk( + self, + backbone_out, + find_inputs, + geometric_prompt: Prompt, + frame_idx_begin, + frame_idx_end, + num_frames, + multigpu_buffer, + run_nms=False, + nms_prob_thresh=None, + nms_iou_thresh=None, + ): + """Compute detection outputs on a chunk of frames and store their results in multigpu_buffer.""" + # each GPU computes detections on one frame in the chunk (in a round-robin manner) + frame_idx_local_gpu = min(frame_idx_begin + self.rank, frame_idx_end - 1) + # `forward_grounding` (from base class `Sam3ImageOnVideo`) runs the detector on a single frame + with torch.profiler.record_function("forward_grounding"): + out_local = self.forward_grounding( + backbone_out=backbone_out, + find_input=find_inputs[frame_idx_local_gpu], + find_target=None, + geometric_prompt=geometric_prompt, + ) + if run_nms: + with torch.profiler.record_function("nms_masks"): + # run NMS as a post-processing step on top of the detection outputs + assert nms_prob_thresh is not None and nms_iou_thresh is not None + pred_probs = out_local["pred_logits"].squeeze(-1).sigmoid() + pred_masks = out_local["pred_masks"] + # loop over text prompts (not an overhead for demo where there's only 1 prompt) + for prompt_idx in range(pred_probs.size(0)): + keep = nms_masks( + pred_probs=pred_probs[prompt_idx], + pred_masks=pred_masks[prompt_idx], + prob_threshold=nms_prob_thresh, + iou_threshold=nms_iou_thresh, + ) + # set a very low threshold for those detections removed by NMS + out_local["pred_logits"][prompt_idx, :, 0] -= 1e4 * (~keep).float() + + if self.gather_backbone_out: + # gather the SAM 2 backbone features across GPUs + feats = out_local["prev_encoder_out"]["backbone_out"]["sam2_backbone_out"] + assert len(feats["backbone_fpn"]) == 3 # SAM2 backbone always have 3 levels + # cast the SAM2 backbone features to bfloat16 for all-gather (this is usually + # a no-op, SAM2 backbone features are likely already in bfloat16 due to AMP) + backbone_fpn_bf16 = [x.to(torch.bfloat16) for x in feats["backbone_fpn"]] + fpn0, fpn_handle0 = self._gather_tensor(backbone_fpn_bf16[0]) + fpn1, fpn_handle1 = self._gather_tensor(backbone_fpn_bf16[1]) + fpn2, fpn_handle2 = self._gather_tensor(backbone_fpn_bf16[2]) + # vision_pos_enc is the same on all frames, so no need to all-gather them + vision_pos_enc = feats["vision_pos_enc"] + + # trim the detector output to only include the necessary keys + out_local = { + "pred_logits": out_local["pred_logits"], + "pred_boxes": out_local["pred_boxes"], + "pred_boxes_xyxy": out_local["pred_boxes_xyxy"], + "pred_masks": out_local["pred_masks"], + } + + # gather the results: after this step, each GPU will receive detector outputs on + # all frames in the chunk and store them in `multigpu_buffer` + out_gathered = {k: self._gather_tensor(v) for k, v in out_local.items()} + for rank in range(self.world_size): + frame_idx_to_save = frame_idx_begin + rank + if frame_idx_to_save >= num_frames: + continue + frame_buffer = { + k: (v[rank], handle) for k, (v, handle) in out_gathered.items() + } + if self.gather_backbone_out: + # also add gathered SAM 2 backbone features to frame_buffer + frame_buffer["tracker_backbone_fpn_0"] = (fpn0[rank], fpn_handle0) + frame_buffer["tracker_backbone_fpn_1"] = (fpn1[rank], fpn_handle1) + frame_buffer["tracker_backbone_fpn_2"] = (fpn2[rank], fpn_handle2) + frame_buffer["tracker_backbone_pos_enc"] = (vision_pos_enc, None) + + multigpu_buffer[frame_idx_to_save] = frame_buffer + + def _gather_tensor(self, x): + if self.world_size == 1: + return [x], None + + async_op = self.async_all_gather + # here `.contiguous()` is required -- otherwise NCCL all_gather + # sometimes gives wrong results + x = x.contiguous() # ensure contiguous memory for NCCL + output_list = [torch.empty_like(x) for _ in range(self.world_size)] + handle = torch.distributed.all_gather(output_list, x, async_op=async_op) + return output_list, handle diff --git a/detect_tools/sam3/sam3/model/sam3_image_processor.py b/detect_tools/sam3/sam3/model/sam3_image_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..4d98fbfbbd3487f2d6f6047e9b9e1bc56190c9cf --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_image_processor.py @@ -0,0 +1,222 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +from typing import Dict, List + +import numpy as np +import PIL +import torch + +from sam3.model import box_ops + +from sam3.model.data_misc import FindStage, interpolate +from torchvision.transforms import v2 + + +class Sam3Processor: + """ """ + + def __init__(self, model, resolution=1008, device="cuda", confidence_threshold=0.5): + self.model = model + self.resolution = resolution + self.device = device + self.transform = v2.Compose( + [ + v2.ToDtype(torch.uint8, scale=True), + v2.Resize(size=(resolution, resolution)), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ] + ) + self.confidence_threshold = confidence_threshold + + self.find_stage = FindStage( + img_ids=torch.tensor([0], device=device, dtype=torch.long), + text_ids=torch.tensor([0], device=device, dtype=torch.long), + input_boxes=None, + input_boxes_mask=None, + input_boxes_label=None, + input_points=None, + input_points_mask=None, + ) + + @torch.inference_mode() + def set_image(self, image, state=None): + """Sets the image on which we want to do predictions.""" + if state is None: + state = {} + + if isinstance(image, PIL.Image.Image): + width, height = image.size + elif isinstance(image, (torch.Tensor, np.ndarray)): + height, width = image.shape[-2:] + else: + raise ValueError("Image must be a PIL image or a tensor") + + image = v2.functional.to_image(image).to(self.device) + image = self.transform(image).unsqueeze(0) + + state["original_height"] = height + state["original_width"] = width + state["backbone_out"] = self.model.backbone.forward_image(image) + inst_interactivity_en = self.model.inst_interactive_predictor is not None + if inst_interactivity_en and "sam2_backbone_out" in state["backbone_out"]: + sam2_backbone_out = state["backbone_out"]["sam2_backbone_out"] + sam2_backbone_out["backbone_fpn"][0] = ( + self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s0( + sam2_backbone_out["backbone_fpn"][0] + ) + ) + sam2_backbone_out["backbone_fpn"][1] = ( + self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s1( + sam2_backbone_out["backbone_fpn"][1] + ) + ) + return state + + @torch.inference_mode() + def set_image_batch(self, images: List[np.ndarray], state=None): + """Sets the image batch on which we want to do predictions.""" + if state is None: + state = {} + + if not isinstance(images, list): + raise ValueError("Images must be a list of PIL images or tensors") + assert len(images) > 0, "Images list must not be empty" + assert isinstance( + images[0], PIL.Image.Image + ), "Images must be a list of PIL images" + + state["original_heights"] = [image.height for image in images] + state["original_widths"] = [image.width for image in images] + + images = [ + self.transform(v2.functional.to_image(image).to(self.device)) + for image in images + ] + images = torch.stack(images, dim=0) + state["backbone_out"] = self.model.backbone.forward_image(images) + inst_interactivity_en = self.model.inst_interactive_predictor is not None + if inst_interactivity_en and "sam2_backbone_out" in state["backbone_out"]: + sam2_backbone_out = state["backbone_out"]["sam2_backbone_out"] + sam2_backbone_out["backbone_fpn"][0] = ( + self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s0( + sam2_backbone_out["backbone_fpn"][0] + ) + ) + sam2_backbone_out["backbone_fpn"][1] = ( + self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s1( + sam2_backbone_out["backbone_fpn"][1] + ) + ) + return state + + @torch.inference_mode() + def set_text_prompt(self, prompt: str, state: Dict): + """Sets the text prompt and run the inference""" + + if "backbone_out" not in state: + raise ValueError("You must call set_image before set_text_prompt") + + text_outputs = self.model.backbone.forward_text([prompt], device=self.device) + # will erase the previous text prompt if any + state["backbone_out"].update(text_outputs) + if "geometric_prompt" not in state: + state["geometric_prompt"] = self.model._get_dummy_prompt() + + return self._forward_grounding(state) + + @torch.inference_mode() + def add_geometric_prompt(self, box: List, label: bool, state: Dict): + """Adds a box prompt and run the inference. + The image needs to be set, but not necessarily the text prompt. + The box is assumed to be in [center_x, center_y, width, height] format and normalized in [0, 1] range. + The label is True for a positive box, False for a negative box. + """ + if "backbone_out" not in state: + raise ValueError("You must call set_image before set_text_prompt") + + if "language_features" not in state["backbone_out"]: + # Looks like we don't have a text prompt yet. This is allowed, but we need to set the text prompt to "visual" for the model to rely only on the geometric prompt + dummy_text_outputs = self.model.backbone.forward_text( + ["visual"], device=self.device + ) + state["backbone_out"].update(dummy_text_outputs) + + if "geometric_prompt" not in state: + state["geometric_prompt"] = self.model._get_dummy_prompt() + + # adding a batch and sequence dimension + boxes = torch.tensor(box, device=self.device, dtype=torch.float32).view(1, 1, 4) + labels = torch.tensor([label], device=self.device, dtype=torch.bool).view(1, 1) + state["geometric_prompt"].append_boxes(boxes, labels) + + return self._forward_grounding(state) + + def reset_all_prompts(self, state: Dict): + """Removes all the prompts and results""" + if "backbone_out" in state: + backbone_keys_to_del = [ + "language_features", + "language_mask", + "language_embeds", + ] + for key in backbone_keys_to_del: + if key in state["backbone_out"]: + del state["backbone_out"][key] + + keys_to_del = ["geometric_prompt", "boxes", "masks", "masks_logits", "scores"] + for key in keys_to_del: + if key in state: + del state[key] + + @torch.inference_mode() + def set_confidence_threshold(self, threshold: float, state=None): + """Sets the confidence threshold for the masks""" + self.confidence_threshold = threshold + if state is not None and "boxes" in state: + # we need to filter the boxes again + # In principle we could do this more efficiently since we would only need + # to rerun the heads. But this is simpler and not too inefficient + return self._forward_grounding(state) + return state + + @torch.inference_mode() + def _forward_grounding(self, state: Dict): + outputs = self.model.forward_grounding( + backbone_out=state["backbone_out"], + find_input=self.find_stage, + geometric_prompt=state["geometric_prompt"], + find_target=None, + ) + + out_bbox = outputs["pred_boxes"] + out_logits = outputs["pred_logits"] + out_masks = outputs["pred_masks"] + out_probs = out_logits.sigmoid() + presence_score = outputs["presence_logit_dec"].sigmoid().unsqueeze(1) + out_probs = (out_probs * presence_score).squeeze(-1) + + keep = out_probs > self.confidence_threshold + out_probs = out_probs[keep] + out_masks = out_masks[keep] + out_bbox = out_bbox[keep] + + # convert to [x0, y0, x1, y1] format + boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) + + img_h = state["original_height"] + img_w = state["original_width"] + scale_fct = torch.tensor([img_w, img_h, img_w, img_h]).to(self.device) + boxes = boxes * scale_fct[None, :] + + out_masks = interpolate( + out_masks.unsqueeze(1), + (img_h, img_w), + mode="bilinear", + align_corners=False, + ).sigmoid() + + state["masks_logits"] = out_masks + state["masks"] = out_masks > 0.5 + state["boxes"] = boxes + state["scores"] = out_probs + return state diff --git a/detect_tools/sam3/sam3/model/sam3_tracker_base.py b/detect_tools/sam3/sam3/model/sam3_tracker_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1591f32591d96e3060668e2330bf79475e46b1e0 --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_tracker_base.py @@ -0,0 +1,1188 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging + +import torch +import torch.nn.functional as F + +from sam3.model.memory import SimpleMaskEncoder + +from sam3.model.sam3_tracker_utils import get_1d_sine_pe, select_closest_cond_frames + +from sam3.sam.mask_decoder import MaskDecoder, MLP +from sam3.sam.prompt_encoder import PromptEncoder +from sam3.sam.transformer import TwoWayTransformer +from sam3.train.data.collator import BatchedDatapoint + +try: + from timm.layers import trunc_normal_ +except ModuleNotFoundError: + # compatibility for older timm versions + from timm.models.layers import trunc_normal_ + +# a large negative value as a placeholder score for missing objects +NO_OBJ_SCORE = -1024.0 + + +class Sam3TrackerBase(torch.nn.Module): + def __init__( + self, + backbone, + transformer, + maskmem_backbone, + num_maskmem=7, # default 1 input frame + 6 previous frames as in CAE + image_size=1008, + backbone_stride=14, # stride of the image backbone output + # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit, + # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model + # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM. + max_cond_frames_in_attn=-1, + # Whether to always keep the first conditioning frame in case we exceed the maximum number of conditioning frames allowed + keep_first_cond_frame=False, + # whether to output multiple (3) masks for the first click on initial conditioning frames + multimask_output_in_sam=False, + # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`; + # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points) + multimask_min_pt_num=1, + multimask_max_pt_num=1, + # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`) + multimask_output_for_tracking=False, + # whether to forward image features per frame (as it's being tracked) during evaluation, instead of forwarding image features + # of all frames at once. This avoids backbone OOM errors on very long videos in evaluation, but could be slightly slower. + forward_backbone_per_frame_for_eval=False, + # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5). + # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of + # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame. + memory_temporal_stride_for_eval=1, + # whether to offload outputs to CPU memory during evaluation, to avoid GPU OOM on very long videos or very large resolutions or too many objects + # (it's recommended to use `forward_backbone_per_frame_for_eval=True` first before setting this option to True) + offload_output_to_cpu_for_eval=False, + # whether to trim the output of past non-conditioning frames (num_maskmem frames before the current frame) during evaluation + # (this helps save GPU or CPU memory on very long videos for semi-supervised VOS eval, where only the first frame receives prompts) + trim_past_non_cond_mem_for_eval=False, + # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks) + non_overlap_masks_for_mem_enc=False, + # the maximum number of object pointers from other frames in encoder cross attention + max_obj_ptrs_in_encoder=16, + # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class. + sam_mask_decoder_extra_args=None, + # whether to compile all the model compoents + compile_all_components=False, + # select the frame with object existence + use_memory_selection=False, + # when using memory selection, the threshold to determine if the frame is good + mf_threshold=0.01, + ): + super().__init__() + + # Part 1: the image backbone + self.backbone = backbone + self.num_feature_levels = 3 + self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder + # A conv layer to downsample the GT mask prompt to stride 4 (the same stride as + # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale, + # so that it can be fed into the SAM mask decoder to generate a pointer. + self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) + + # Part 2: encoder-only transformer to fuse current frame's visual features + # with memories from past frames + assert transformer.decoder is None, "transformer should be encoder-only" + self.transformer = transformer + self.hidden_dim = transformer.d_model + + # Part 3: memory encoder for the previous frame's outputs + self.maskmem_backbone = maskmem_backbone + self.mem_dim = self.hidden_dim + if hasattr(self.maskmem_backbone, "out_proj") and hasattr( + self.maskmem_backbone.out_proj, "weight" + ): + # if there is compression of memories along channel dim + self.mem_dim = self.maskmem_backbone.out_proj.weight.shape[0] + self.num_maskmem = num_maskmem # Number of memories accessible + + # Temporal encoding of the memories + self.maskmem_tpos_enc = torch.nn.Parameter( + torch.zeros(num_maskmem, 1, 1, self.mem_dim) + ) + trunc_normal_(self.maskmem_tpos_enc, std=0.02) + + # a single token to indicate no memory embedding from previous frames + self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + trunc_normal_(self.no_mem_embed, std=0.02) + trunc_normal_(self.no_mem_pos_enc, std=0.02) + # Apply sigmoid to the output raw mask logits (to turn them from + # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder + self.sigmoid_scale_for_mem_enc = 20.0 + self.sigmoid_bias_for_mem_enc = -10.0 + self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc + self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval + # On frames with mask input, whether to directly output the input mask without + # using a SAM prompt encoder + mask decoder + self.multimask_output_in_sam = multimask_output_in_sam + self.multimask_min_pt_num = multimask_min_pt_num + self.multimask_max_pt_num = multimask_max_pt_num + self.multimask_output_for_tracking = multimask_output_for_tracking + + # Part 4: SAM-style prompt encoder (for both mask and point inputs) + # and SAM-style mask decoder for the final mask output + self.image_size = image_size + self.backbone_stride = backbone_stride + self.low_res_mask_size = self.image_size // self.backbone_stride * 4 + # we resize the mask if it doesn't match `self.input_mask_size` (which is always 4x + # the low-res mask size, regardless of the actual input image size); this is because + # `_use_mask_as_output` always downsamples the input masks by 4x + self.input_mask_size = self.low_res_mask_size * 4 + self.forward_backbone_per_frame_for_eval = forward_backbone_per_frame_for_eval + self.offload_output_to_cpu_for_eval = offload_output_to_cpu_for_eval + self.trim_past_non_cond_mem_for_eval = trim_past_non_cond_mem_for_eval + self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args + self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) + trunc_normal_(self.no_obj_ptr, std=0.02) + self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim)) + trunc_normal_(self.no_obj_embed_spatial, std=0.02) + + self._build_sam_heads() + self.max_cond_frames_in_attn = max_cond_frames_in_attn + self.keep_first_cond_frame = keep_first_cond_frame + + # Use frame filtering according to SAM2Long + self.use_memory_selection = use_memory_selection + self.mf_threshold = mf_threshold + + # Compile all components of the model + self.compile_all_components = compile_all_components + if self.compile_all_components: + self._compile_all_components() + + @property + def device(self): + return next(self.parameters()).device + + def _get_tpos_enc(self, rel_pos_list, device, max_abs_pos=None, dummy=False): + if dummy: + return torch.zeros(len(rel_pos_list), self.mem_dim, device=device) + + t_diff_max = max_abs_pos - 1 if max_abs_pos is not None else 1 + pos_enc = ( + torch.tensor(rel_pos_list).pin_memory().to(device=device, non_blocking=True) + / t_diff_max + ) + tpos_dim = self.hidden_dim + pos_enc = get_1d_sine_pe(pos_enc, dim=tpos_dim) + pos_enc = self.obj_ptr_tpos_proj(pos_enc) + + return pos_enc + + def _build_sam_heads(self): + """Build SAM-style prompt encoder and mask decoder.""" + self.sam_prompt_embed_dim = self.hidden_dim + self.sam_image_embedding_size = self.image_size // self.backbone_stride + + # build PromptEncoder and MaskDecoder from SAM + # (their hyperparameters like `mask_in_chans=16` are from SAM code) + self.sam_prompt_encoder = PromptEncoder( + embed_dim=self.sam_prompt_embed_dim, + image_embedding_size=( + self.sam_image_embedding_size, + self.sam_image_embedding_size, + ), + input_image_size=(self.image_size, self.image_size), + mask_in_chans=16, + ) + self.sam_mask_decoder = MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=self.sam_prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=self.sam_prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + use_high_res_features=True, + iou_prediction_use_sigmoid=True, + pred_obj_scores=True, + pred_obj_scores_mlp=True, + use_multimask_token_for_obj_ptr=True, + **(self.sam_mask_decoder_extra_args or {}), + ) + # a linear projection on SAM output tokens to turn them into object pointers + self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim) + self.obj_ptr_proj = MLP(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3) + # a linear projection on temporal positional encoding in object pointers to + # avoid potential interference with spatial positional encoding + self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim) + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + gt_masks=None, + ): + """ + Forward SAM prompt encoders and mask heads. + + Inputs: + - backbone_features: image features of [B, C, H, W] shape + - point_inputs: a dictionary with "point_coords" and "point_labels", where + 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the + absolute pixel-unit coordinate in (x, y) format of the P input points + 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means + positive clicks, 0 means negative clicks, and -1 means padding + - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the + same spatial size as the image. + - high_res_features: either 1) None or 2) or a list of length 2 containing + two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively, + which will be used as high-resolution feature maps for SAM decoder. + - multimask_output: if it's True, we output 3 candidate masks and their 3 + corresponding IoU estimates, and if it's False, we output only 1 mask and + its corresponding IoU estimate. + + Outputs: + - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if + `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM + output mask logits (before sigmoid) for the low-resolution masks, with 4x + the resolution (1/4 stride) of the input backbone_features. + - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3 + if `multimask_output=True` and M = 1 if `multimask_output=False`), + upsampled from the low-resolution masks, with shape size as the image + (stride is 1 pixel). + - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1 + if `multimask_output=False`), the estimated IoU of each output mask. + - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `low_res_multimasks`. + - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `high_res_multimasks`. + - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted + based on the output token from the SAM mask decoder. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + # Clone image_pe and the outputs of sam_prompt_encoder + # to enable compilation + sparse_embeddings = self._maybe_clone(sparse_embeddings) + dense_embeddings = self._maybe_clone(dense_embeddings) + image_pe = self._maybe_clone(self.sam_prompt_encoder.get_dense_pe()) + with torch.profiler.record_function("sam_mask_decoder"): + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + # Clone the output of sam_mask_decoder + # to enable compilation + low_res_multimasks = self._maybe_clone(low_res_multimasks) + ious = self._maybe_clone(ious) + sam_output_tokens = self._maybe_clone(sam_output_tokens) + object_score_logits = self._maybe_clone(object_score_logits) + + if self.training and self.teacher_force_obj_scores_for_mem: + # we use gt to detect if there is an object or not to + # select no obj ptr and use an empty mask for spatial memory + is_obj_appearing = torch.any(gt_masks.float().flatten(1) > 0, dim=1) + is_obj_appearing = is_obj_appearing[..., None] + else: + is_obj_appearing = object_score_logits > 0 + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + low_res_multimasks = torch.where( + is_obj_appearing[:, None, None], + low_res_multimasks, + NO_OBJ_SCORE, + ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks, + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + + sam_output_token = sam_output_tokens[:, 0] + if multimask_output: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + lambda_is_obj_appearing = is_obj_appearing.float() + + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): + """ + Directly turn binary `mask_inputs` into a output mask logits without using SAM. + (same input and output shapes as in _forward_sam_heads above). + """ + # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). + out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 + mask_inputs_float = mask_inputs.float() + high_res_masks = mask_inputs_float * out_scale + out_bias + low_res_masks = F.interpolate( + high_res_masks, + size=( + high_res_masks.size(-2) // self.backbone_stride * 4, + high_res_masks.size(-1) // self.backbone_stride * 4, + ), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + # a dummy IoU prediction of all 1's under mask input + ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float() + # produce an object pointer using the SAM decoder from the mask input + _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads( + backbone_features=backbone_features, + mask_inputs=self.mask_downsample(mask_inputs_float), + high_res_features=high_res_features, + gt_masks=mask_inputs, + ) + # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; + # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying + # on the object_scores from the SAM decoder. + is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) + is_obj_appearing = is_obj_appearing[..., None] + lambda_is_obj_appearing = is_obj_appearing.float() + object_score_logits = out_scale * lambda_is_obj_appearing + out_bias + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_masks, + high_res_masks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def forward(self, input: BatchedDatapoint, is_inference=False): + raise NotImplementedError( + "Please use the corresponding methods in SAM3VideoPredictor for inference." + "See examples/sam3_dense_video_tracking.ipynb for an inference example." + ) + + def forward_image(self, img_batch): + """Get the image feature on the input batch.""" + # This line is the only change from the parent class + # to use the SAM3 backbone instead of the SAM2 backbone. + backbone_out = self.backbone.forward_image(img_batch)["sam2_backbone_out"] + # precompute projected level 0 and level 1 features in SAM decoder + # to avoid running it again on every SAM click + backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0( + backbone_out["backbone_fpn"][0] + ) + backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( + backbone_out["backbone_fpn"][1] + ) + # Clone to help torch.compile + for i in range(len(backbone_out["backbone_fpn"])): + backbone_out["backbone_fpn"][i] = self._maybe_clone( + backbone_out["backbone_fpn"][i] + ) + backbone_out["vision_pos_enc"][i] = self._maybe_clone( + backbone_out["vision_pos_enc"][i] + ) + return backbone_out + + def _prepare_backbone_features(self, backbone_out): + """Prepare and flatten visual features (same as in MDETR_API model).""" + backbone_out = backbone_out.copy() + assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"]) + assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels + + feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :] + vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :] + + feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds] + # flatten NxCxHxW to HWxNxC + vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps] + vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds] + + return backbone_out, vision_feats, vision_pos_embeds, feat_sizes + + def _prepare_backbone_features_per_frame(self, img_batch, img_ids): + """Compute the image backbone features on the fly for the given img_ids.""" + # Only forward backbone on unique image ids to avoid repeatitive computation + # (if `img_ids` has only one element, it's already unique so we skip this step). + if img_ids.numel() > 1: + unique_img_ids, inv_ids = torch.unique(img_ids, return_inverse=True) + else: + unique_img_ids, inv_ids = img_ids, None + + # Compute the image features on those unique image ids + image = img_batch[unique_img_ids] + backbone_out = self.forward_image(image) + ( + _, + vision_feats, + vision_pos_embeds, + feat_sizes, + ) = self._prepare_backbone_features(backbone_out) + # Inverse-map image features for `unique_img_ids` to the final image features + # for the original input `img_ids`. + if inv_ids is not None: + image = image[inv_ids] + vision_feats = [x[:, inv_ids] for x in vision_feats] + vision_pos_embeds = [x[:, inv_ids] for x in vision_pos_embeds] + + return image, vision_feats, vision_pos_embeds, feat_sizes + + def cal_mem_score(self, object_score_logits, iou_score): + object_score_norm = torch.where( + object_score_logits > 0, + object_score_logits.sigmoid() * 2 - 1, ## rescale to [0, 1] + torch.zeros_like(object_score_logits), + ) + score_per_frame = (object_score_norm * iou_score).mean() + return score_per_frame + + def frame_filter(self, output_dict, track_in_reverse, frame_idx, num_frames, r): + if (frame_idx == 0 and not track_in_reverse) or ( + frame_idx == num_frames - 1 and track_in_reverse + ): + return [] + + max_num = min( + num_frames, self.max_obj_ptrs_in_encoder + ) ## maximum number of pointer memory frames to consider + + if not track_in_reverse: + start = frame_idx - 1 + end = 0 + step = -r + must_include = frame_idx - 1 + else: + start = frame_idx + 1 + end = num_frames + step = r + must_include = frame_idx + 1 + + valid_indices = [] + for i in range(start, end, step): + if ( + i not in output_dict["non_cond_frame_outputs"] + or "eff_iou_score" not in output_dict["non_cond_frame_outputs"][i] + ): + continue + + score_per_frame = output_dict["non_cond_frame_outputs"][i]["eff_iou_score"] + + if score_per_frame > self.mf_threshold: # threshold + valid_indices.insert(0, i) + + if len(valid_indices) >= max_num - 1: + break + + if must_include not in valid_indices: + valid_indices.append(must_include) + + return valid_indices + + def _prepare_memory_conditioned_features( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + use_prev_mem_frame=True, + ): + """Fuse the current frame's visual feature map with previous memory.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + device = current_vision_feats[-1].device + # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images. + # In this case, we skip the fusion with any memory. + if self.num_maskmem == 0: # Disable memory and skip fusion + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + return pix_feat + + num_obj_ptr_tokens = 0 + tpos_sign_mul = -1 if track_in_reverse else 1 + # Step 1: condition the visual features of the current frame on previous memories + if not is_init_cond_frame and use_prev_mem_frame: + # Retrieve the memories encoded with the maskmem backbone + to_cat_prompt, to_cat_prompt_mask, to_cat_prompt_pos_embed = [], [], [] + # Add conditioning frames's output first (all cond frames have t_pos=0 for + # when getting temporal positional embedding below) + assert len(output_dict["cond_frame_outputs"]) > 0 + # Select a maximum number of temporally closest cond frames for cross attention + cond_outputs = output_dict["cond_frame_outputs"] + selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames( + frame_idx, + cond_outputs, + self.max_cond_frames_in_attn, + keep_first_cond_frame=self.keep_first_cond_frame, + ) + t_pos_and_prevs = [ + ((frame_idx - t) * tpos_sign_mul, out, True) + for t, out in selected_cond_outputs.items() + ] + # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory + # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1 + # We also allow taking the memory frame non-consecutively (with r>1), in which case + # we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame. + r = 1 if self.training else self.memory_temporal_stride_for_eval + + if self.use_memory_selection: + valid_indices = self.frame_filter( + output_dict, track_in_reverse, frame_idx, num_frames, r + ) + + for t_pos in range(1, self.num_maskmem): + t_rel = self.num_maskmem - t_pos # how many frames before current frame + if self.use_memory_selection: + if t_rel > len(valid_indices): + continue + prev_frame_idx = valid_indices[-t_rel] + else: + if t_rel == 1: + # for t_rel == 1, we take the last frame (regardless of r) + if not track_in_reverse: + # the frame immediately before this frame (i.e. frame_idx - 1) + prev_frame_idx = frame_idx - t_rel + else: + # the frame immediately after this frame (i.e. frame_idx + 1) + prev_frame_idx = frame_idx + t_rel + else: + # for t_rel >= 2, we take the memory frame from every r-th frames + if not track_in_reverse: + # first find the nearest frame among every r-th frames before this frame + # for r=1, this would be (frame_idx - 2) + prev_frame_idx = ((frame_idx - 2) // r) * r + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx - (t_rel - 2) * r + else: + # first find the nearest frame among every r-th frames after this frame + # for r=1, this would be (frame_idx + 2) + prev_frame_idx = -(-(frame_idx + 2) // r) * r + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx + (t_rel - 2) * r + + out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None) + if out is None: + # If an unselected conditioning frame is among the last (self.num_maskmem - 1) + # frames, we still attend to it as if it's a non-conditioning frame. + out = unselected_cond_outputs.get(prev_frame_idx, None) + t_pos_and_prevs.append((t_pos, out, False)) + + for t_pos, prev, is_selected_cond_frame in t_pos_and_prevs: + if prev is None: + continue # skip padding frames + # "maskmem_features" might have been offloaded to CPU in demo use cases, + # so we load it back to GPU (it's a no-op if it's already on GPU). + feats = prev["maskmem_features"].cuda(non_blocking=True) + seq_len = feats.shape[-2] * feats.shape[-1] + to_cat_prompt.append(feats.flatten(2).permute(2, 0, 1)) + to_cat_prompt_mask.append( + torch.zeros(B, seq_len, device=device, dtype=bool) + ) + # Spatial positional encoding (it might have been offloaded to CPU in eval) + maskmem_enc = prev["maskmem_pos_enc"][-1].cuda() + maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1) + + if ( + is_selected_cond_frame + and getattr(self, "cond_frame_spatial_embedding", None) is not None + ): + # add a spatial embedding for the conditioning frame + maskmem_enc = maskmem_enc + self.cond_frame_spatial_embedding + + # Temporal positional encoding + t = t_pos if not is_selected_cond_frame else 0 + maskmem_enc = ( + maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t - 1] + ) + to_cat_prompt_pos_embed.append(maskmem_enc) + + # Construct the list of past object pointers + # Optionally, select only a subset of spatial memory frames during trainining + if ( + self.training + and self.prob_to_dropout_spatial_mem > 0 + and self.rng.random() < self.prob_to_dropout_spatial_mem + ): + num_spatial_mem_keep = self.rng.integers(len(to_cat_prompt) + 1) + keep = self.rng.choice( + range(len(to_cat_prompt)), num_spatial_mem_keep, replace=False + ).tolist() + to_cat_prompt = [to_cat_prompt[i] for i in keep] + to_cat_prompt_mask = [to_cat_prompt_mask[i] for i in keep] + to_cat_prompt_pos_embed = [to_cat_prompt_pos_embed[i] for i in keep] + + max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder) + # First add those object pointers from selected conditioning frames + # (optionally, only include object pointers in the past during evaluation) + if not self.training: + ptr_cond_outputs = { + t: out + for t, out in selected_cond_outputs.items() + if (t >= frame_idx if track_in_reverse else t <= frame_idx) + } + else: + ptr_cond_outputs = selected_cond_outputs + pos_and_ptrs = [ + # Temporal pos encoding contains how far away each pointer is from current frame + ( + (frame_idx - t) * tpos_sign_mul, + out["obj_ptr"], + True, # is_selected_cond_frame + ) + for t, out in ptr_cond_outputs.items() + ] + + # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame + for t_diff in range(1, max_obj_ptrs_in_encoder): + if not self.use_memory_selection: + t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff + if t < 0 or (num_frames is not None and t >= num_frames): + break + else: + if -t_diff <= -len(valid_indices): + break + t = valid_indices[-t_diff] + + out = output_dict["non_cond_frame_outputs"].get( + t, unselected_cond_outputs.get(t, None) + ) + if out is not None: + pos_and_ptrs.append((t_diff, out["obj_ptr"], False)) + + # If we have at least one object pointer, add them to the across attention + if len(pos_and_ptrs) > 0: + pos_list, ptrs_list, is_selected_cond_frame_list = zip(*pos_and_ptrs) + # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape + obj_ptrs = torch.stack(ptrs_list, dim=0) + if getattr(self, "cond_frame_obj_ptr_embedding", None) is not None: + obj_ptrs = ( + obj_ptrs + + self.cond_frame_obj_ptr_embedding + * torch.tensor(is_selected_cond_frame_list, device=device)[ + ..., None, None + ].float() + ) + # a temporal positional embedding based on how far each object pointer is from + # the current frame (sine embedding normalized by the max pointer num). + obj_pos = self._get_tpos_enc( + pos_list, + max_abs_pos=max_obj_ptrs_in_encoder, + device=device, + ) + # expand to batch size + obj_pos = obj_pos.unsqueeze(1).expand(-1, B, -1) + + if self.mem_dim < C: + # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C + obj_ptrs = obj_ptrs.reshape(-1, B, C // self.mem_dim, self.mem_dim) + obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) + obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0) + to_cat_prompt.append(obj_ptrs) + to_cat_prompt_mask.append(None) # "to_cat_prompt_mask" is not used + to_cat_prompt_pos_embed.append(obj_pos) + num_obj_ptr_tokens = obj_ptrs.shape[0] + else: + num_obj_ptr_tokens = 0 + else: + # directly add no-mem embedding (instead of using the transformer encoder) + pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + # Use a dummy token on the first grame (to avoid emtpy memory input to tranformer encoder) + to_cat_prompt = [self.no_mem_embed.expand(1, B, self.mem_dim)] + to_cat_prompt_mask = [torch.zeros(B, 1, device=device, dtype=bool)] + to_cat_prompt_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)] + + # Step 2: Concatenate the memories and forward through the transformer encoder + prompt = torch.cat(to_cat_prompt, dim=0) + prompt_mask = None # For now, we always masks are zeros anyways + prompt_pos_embed = torch.cat(to_cat_prompt_pos_embed, dim=0) + encoder_out = self.transformer.encoder( + src=current_vision_feats, + src_key_padding_mask=[None], + src_pos=current_vision_pos_embeds, + prompt=prompt, + prompt_pos=prompt_pos_embed, + prompt_key_padding_mask=prompt_mask, + feat_sizes=feat_sizes, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + # reshape the output (HW)BC => BCHW + pix_feat_with_mem = encoder_out["memory"].permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + def _encode_new_memory( + self, + image, + current_vision_feats, + feat_sizes, + pred_masks_high_res, + object_score_logits, + is_mask_from_pts, + output_dict=None, + is_init_cond_frame=False, + ): + """Encode the current image and its prediction into a memory feature.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + # top-level feature, (HW)BC => BCHW + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + if self.non_overlap_masks_for_mem_enc and not self.training: + # optionally, apply non-overlapping constraints to the masks (it's applied + # in the batch dimension and should only be used during eval, where all + # the objects come from the same video under batch size 1). + pred_masks_high_res = self._apply_non_overlapping_constraints( + pred_masks_high_res + ) + # scale the raw mask logits with a temperature before applying sigmoid + if is_mask_from_pts and not self.training: + mask_for_mem = (pred_masks_high_res > 0).float() + else: + # apply sigmoid on the raw mask logits to turn them into range (0, 1) + mask_for_mem = torch.sigmoid(pred_masks_high_res) + # apply scale and bias terms to the sigmoid probabilities + if self.sigmoid_scale_for_mem_enc != 1.0: + mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc + if self.sigmoid_bias_for_mem_enc != 0.0: + mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc + + if isinstance(self.maskmem_backbone, SimpleMaskEncoder): + pix_feat = pix_feat.view_as(pix_feat) + maskmem_out = self.maskmem_backbone( + pix_feat, mask_for_mem, skip_mask_sigmoid=True + ) + else: + maskmem_out = self.maskmem_backbone(image, pix_feat, mask_for_mem) + # Clone the feats and pos_enc to enable compilation + maskmem_features = self._maybe_clone(maskmem_out["vision_features"]) + maskmem_pos_enc = [self._maybe_clone(m) for m in maskmem_out["vision_pos_enc"]] + # add a no-object embedding to the spatial memory to indicate that the frame + # is predicted to be occluded (i.e. no object is appearing in the frame) + is_obj_appearing = (object_score_logits > 0).float() + maskmem_features += ( + 1 - is_obj_appearing[..., None, None] + ) * self.no_obj_embed_spatial[..., None, None].expand(*maskmem_features.shape) + + return maskmem_features, maskmem_pos_enc + + def forward_tracking(self, backbone_out, input, return_dict=False): + """Forward video tracking on each frame (and sample correction clicks).""" + img_feats_already_computed = backbone_out["backbone_fpn"] is not None + if img_feats_already_computed: + # Prepare the backbone features + # - vision_feats and vision_pos_embeds are in (HW)BC format + ( + _, + vision_feats, + vision_pos_embeds, + feat_sizes, + ) = self._prepare_backbone_features(backbone_out) + + # Starting the stage loop + num_frames = backbone_out["num_frames"] + init_cond_frames = backbone_out["init_cond_frames"] + frames_to_add_correction_pt = backbone_out["frames_to_add_correction_pt"] + # first process all the initial conditioning frames to encode them as memory, + # and then conditioning on them to track the remaining frames + processing_order = init_cond_frames + backbone_out["frames_not_in_init_cond"] + output_dict = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + for stage_id in processing_order: + # Get the image features for the current frames + img_ids = input.find_inputs[stage_id].img_ids + if img_feats_already_computed: + # Retrieve image features according to img_ids (if they are already computed). + current_image = input.img_batch[img_ids] + current_vision_feats = [x[:, img_ids] for x in vision_feats] + current_vision_pos_embeds = [x[:, img_ids] for x in vision_pos_embeds] + else: + # Otherwise, compute the image features on the fly for the given img_ids + # (this might be used for evaluation on long videos to avoid backbone OOM). + ( + current_image, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._prepare_backbone_features_per_frame(input.img_batch, img_ids) + # Get output masks based on this frame's prompts and previous memory + current_out = self.track_step( + frame_idx=stage_id, + is_init_cond_frame=stage_id in init_cond_frames, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + image=current_image, + point_inputs=backbone_out["point_inputs_per_frame"].get(stage_id, None), + mask_inputs=backbone_out["mask_inputs_per_frame"].get(stage_id, None), + gt_masks=backbone_out["gt_masks_per_frame"].get(stage_id, None), + frames_to_add_correction_pt=frames_to_add_correction_pt, + output_dict=output_dict, + num_frames=num_frames, + ) + # Append the output, depending on whether it's a conditioning frame + add_output_as_cond_frame = stage_id in init_cond_frames or ( + self.add_all_frames_to_correct_as_cond + and stage_id in frames_to_add_correction_pt + ) + if add_output_as_cond_frame: + output_dict["cond_frame_outputs"][stage_id] = current_out + else: + output_dict["non_cond_frame_outputs"][stage_id] = current_out + + if return_dict: + return output_dict + # turn `output_dict` into a list for loss function + all_frame_outputs = {} + all_frame_outputs.update(output_dict["cond_frame_outputs"]) + all_frame_outputs.update(output_dict["non_cond_frame_outputs"]) + all_frame_outputs = [all_frame_outputs[t] for t in range(num_frames)] + # Make DDP happy with activation checkpointing by removing unused keys + all_frame_outputs = [ + {k: v for k, v in d.items() if k != "obj_ptr"} for d in all_frame_outputs + ] + + return all_frame_outputs + + def track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + image, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + # Whether to run the memory encoder on the predicted masks. Sometimes we might want + # to skip the memory encoder with `run_mem_encoder=False`. For example, + # in demo we might call `track_step` multiple times for each user click, + # and only encode the memory when the user finalizes their clicks. And in ablation + # settings like SAM training on static images, we don't need the memory encoder. + run_mem_encoder=True, + # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). + prev_sam_mask_logits=None, + use_prev_mem_frame=True, + ): + current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} + # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW + if len(current_vision_feats) > 1: + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1]) + ] + else: + high_res_features = None + if mask_inputs is not None: + # (see it as a GT mask) without using a SAM prompt encoder + mask decoder. + pix_feat = current_vision_feats[-1].permute(1, 2, 0) + pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) + sam_outputs = self._use_mask_as_output( + pix_feat, high_res_features, mask_inputs + ) + else: + # fused the visual feature with previous memory features in the memory bank + pix_feat_with_mem = self._prepare_memory_conditioned_features( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats[-1:], + current_vision_pos_embeds=current_vision_pos_embeds[-1:], + feat_sizes=feat_sizes[-1:], + output_dict=output_dict, + num_frames=num_frames, + track_in_reverse=track_in_reverse, + use_prev_mem_frame=use_prev_mem_frame, + ) + # apply SAM-style segmentation head + # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, + # e.g. in demo where such logits come from earlier interaction instead of correction sampling + # (in this case, the SAM mask decoder should have `self.iter_use_prev_mask_pred=True`, and + # any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) + if prev_sam_mask_logits is not None: + assert self.iter_use_prev_mask_pred + assert point_inputs is not None and mask_inputs is None + mask_inputs = prev_sam_mask_logits + multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) + sam_outputs = self._forward_sam_heads( + backbone_features=pix_feat_with_mem, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + high_res_features=high_res_features, + multimask_output=multimask_output, + ) + ( + _, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) = sam_outputs + # Use the final prediction (after all correction steps for output and eval) + current_out["pred_masks"] = low_res_masks + current_out["pred_masks_high_res"] = high_res_masks + current_out["obj_ptr"] = obj_ptr + if self.use_memory_selection: + current_out["object_score_logits"] = object_score_logits + iou_score = ious.max(-1)[0] + current_out["iou_score"] = iou_score + current_out["eff_iou_score"] = self.cal_mem_score( + object_score_logits, iou_score + ) + if not self.training: + # Only add this in inference (to avoid unused param in activation checkpointing; + # it's mainly used in the demo to encode spatial memories w/ consolidated masks) + current_out["object_score_logits"] = object_score_logits + + # Finally run the memory encoder on the predicted mask to encode + # it into a new memory feature (that can be used in future frames) + # (note that `self.num_maskmem == 0` is primarily used for reproducing SAM on + # images, in which case we'll just skip memory encoder to save compute). + if run_mem_encoder and self.num_maskmem > 0: + high_res_masks_for_mem_enc = high_res_masks + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + image=image, + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks_for_mem_enc, + object_score_logits=object_score_logits, + is_mask_from_pts=(point_inputs is not None), + output_dict=output_dict, + is_init_cond_frame=is_init_cond_frame, + ) + current_out["maskmem_features"] = maskmem_features + current_out["maskmem_pos_enc"] = maskmem_pos_enc + else: + current_out["maskmem_features"] = None + current_out["maskmem_pos_enc"] = None + + # Optionally, offload the outputs to CPU memory during evaluation to avoid + # GPU OOM on very long videos or very large resolution or too many objects + if self.offload_output_to_cpu_for_eval and not self.training: + # Here we only keep those keys needed for evaluation to get a compact output + trimmed_out = { + "pred_masks": current_out["pred_masks"].cpu(), + "pred_masks_high_res": current_out["pred_masks_high_res"].cpu(), + # other items for evaluation (these are small tensors so we keep them on GPU) + "obj_ptr": current_out["obj_ptr"], + "object_score_logits": current_out["object_score_logits"], + } + if run_mem_encoder and self.num_maskmem > 0: + trimmed_out["maskmem_features"] = maskmem_features.cpu() + trimmed_out["maskmem_pos_enc"] = [x.cpu() for x in maskmem_pos_enc] + if self.use_memory_selection: + trimmed_out["iou_score"] = current_out["iou_score"].cpu() + trimmed_out["eff_iou_score"] = current_out["eff_iou_score"].cpu() + current_out = trimmed_out + + # Optionally, trim the output of past non-conditioning frame (r * num_maskmem frames + # before the current frame) during evaluation. This is intended to save GPU or CPU + # memory for semi-supervised VOS eval, where only the first frame receives prompts. + def _trim_past_out(past_out, current_out): + if past_out is None: + return None + return { + "pred_masks": past_out["pred_masks"], + "obj_ptr": past_out["obj_ptr"], + "object_score_logits": past_out["object_score_logits"], + } + + if self.trim_past_non_cond_mem_for_eval and not self.training: + r = self.memory_temporal_stride_for_eval + past_frame_idx = frame_idx - r * self.num_maskmem + past_out = output_dict["non_cond_frame_outputs"].get(past_frame_idx, None) + + if past_out is not None: + print(past_out.get("eff_iou_score", 0)) + if ( + self.use_memory_selection + and past_out.get("eff_iou_score", 0) < self.mf_threshold + ) or not self.use_memory_selection: + output_dict["non_cond_frame_outputs"][past_frame_idx] = ( + _trim_past_out(past_out, current_out) + ) + + if ( + self.use_memory_selection and not self.offload_output_to_cpu_for_eval + ): ## design for memory selection, trim too old frames to save memory + far_old_frame_idx = frame_idx - 20 * self.max_obj_ptrs_in_encoder + past_out = output_dict["non_cond_frame_outputs"].get( + far_old_frame_idx, None + ) + if past_out is not None: + output_dict["non_cond_frame_outputs"][far_old_frame_idx] = ( + _trim_past_out(past_out, current_out) + ) + + return current_out + + def _use_multimask(self, is_init_cond_frame, point_inputs): + """Whether to use multimask output in the SAM head.""" + num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1) + multimask_output = ( + self.multimask_output_in_sam + and (is_init_cond_frame or self.multimask_output_for_tracking) + and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num) + ) + return multimask_output + + def _apply_non_overlapping_constraints(self, pred_masks): + """ + Apply non-overlapping constraints to the object scores in pred_masks. Here we + keep only the highest scoring object at each spatial location in pred_masks. + """ + batch_size = pred_masks.size(0) + if batch_size == 1: + return pred_masks + + device = pred_masks.device + # "max_obj_inds": object index of the object with the highest score at each location + max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True) + # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks` + batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None] + keep = max_obj_inds == batch_obj_inds + # suppress overlapping regions' scores below -10.0 so that the foreground regions + # don't overlap (here sigmoid(-10.0)=4.5398e-05) + pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0)) + return pred_masks + + def _compile_all_components(self): + """Compile all model components for faster inference.""" + # a larger cache size to hold varying number of shapes for torch.compile + # see https://github.com/pytorch/pytorch/blob/v2.5.1/torch/_dynamo/config.py#L42-L49 + torch._dynamo.config.cache_size_limit = 64 + torch._dynamo.config.accumulated_cache_size_limit = 2048 + from sam3.perflib.compile import compile_wrapper + + logging.info("Compiling all components. First time may be very slow.") + + self.maskmem_backbone.forward = compile_wrapper( + self.maskmem_backbone.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + self.transformer.encoder.forward = compile_wrapper( + self.transformer.encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=True, # Num. of memories varies + ) + # We disable compilation of sam_prompt_encoder as it sometimes gives a large accuracy regression, + # especially when sam_mask_prompt (previous mask logits) is not None + # self.sam_prompt_encoder.forward = torch.compile( + # self.sam_prompt_encoder.forward, + # mode="max-autotune", + # fullgraph=True, + # dynamic=False, # Accuracy regression on True + # ) + self.sam_mask_decoder.forward = compile_wrapper( + self.sam_mask_decoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, # Accuracy regression on True + ) + + def _maybe_clone(self, x): + """Clone a tensor if and only if `self.compile_all_components` is True.""" + return x.clone() if self.compile_all_components else x + + +def concat_points(old_point_inputs, new_points, new_labels): + """Add new points and labels to previous point inputs (add at the end).""" + if old_point_inputs is None: + points, labels = new_points, new_labels + else: + points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1) + labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1) + + return {"point_coords": points, "point_labels": labels} diff --git a/detect_tools/sam3/sam3/model/sam3_tracker_utils.py b/detect_tools/sam3/sam3/model/sam3_tracker_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7afc70aab716b4ad22370350ac836fbc46c9012f --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_tracker_utils.py @@ -0,0 +1,427 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import numpy as np +import torch +import torch.nn.functional as F +from numpy.typing import NDArray + +from sam3.model.edt import edt_triton + + +def sample_box_points( + masks: torch.Tensor, + noise: float = 0.1, # SAM default + noise_bound: int = 20, # SAM default + top_left_label: int = 2, + bottom_right_label: int = 3, +) -> tuple[NDArray, NDArray]: + """ + Sample a noised version of the top left and bottom right corners of a given `bbox` + + Inputs: + - masks: [B, 1, H, W] tensor + - noise: noise as a fraction of box width and height, dtype=float + - noise_bound: maximum amount of noise (in pure pixels), dtype=int + + Returns: + - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float + - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32 + """ + device = masks.device + box_coords = mask_to_box(masks) + B, _, H, W = masks.shape + box_labels = torch.tensor( + [top_left_label, bottom_right_label], dtype=torch.int, device=device + ).repeat(B) + if noise > 0.0: + if not isinstance(noise_bound, torch.Tensor): + noise_bound = torch.tensor(noise_bound, device=device) + bbox_w = box_coords[..., 2] - box_coords[..., 0] + bbox_h = box_coords[..., 3] - box_coords[..., 1] + max_dx = torch.min(bbox_w * noise, noise_bound) + max_dy = torch.min(bbox_h * noise, noise_bound) + box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1 + box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1) + + box_coords = box_coords + box_noise + img_bounds = ( + torch.tensor([W, H, W, H], device=device) - 1 + ) # uncentered pixel coords + box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping + + box_coords = box_coords.reshape(-1, 2, 2) # always 2 points + box_labels = box_labels.reshape(-1, 2) + return box_coords, box_labels + + +def mask_to_box(masks: torch.Tensor): + """ + compute bounding box given an input mask + + Inputs: + - masks: [B, 1, H, W] tensor + + Returns: + - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor + """ + B, _, h, w = masks.shape + device = masks.device + mask_area = masks.sum(dim=(-1, -2)) + xs = torch.arange(w, device=device, dtype=torch.int32) + ys = torch.arange(h, device=device, dtype=torch.int32) + grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy") + grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w) + grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w) + min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1) + max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1) + min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1) + max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1) + bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1) + bbox_coords = torch.where( + mask_area[..., None] > 0, bbox_coords, torch.zeros_like(bbox_coords) + ) + return bbox_coords + + +def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1): + """ + Sample `num_pt` random points (along with their labels) independently from the error regions. + + Inputs: + - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool + - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None + - num_pt: int, number of points to sample independently for each of the B error maps + + Outputs: + - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point + - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means + negative clicks + """ + if pred_masks is None: # if pred_masks is not provided, treat it as empty + pred_masks = torch.zeros_like(gt_masks) + assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1 + assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape + assert num_pt >= 0 + + B, _, H_im, W_im = gt_masks.shape + device = gt_masks.device + + # false positive region, a new point sampled in this region should have + # negative label to correct the FP error + fp_masks = ~gt_masks & pred_masks + # false negative region, a new point sampled in this region should have + # positive label to correct the FN error + fn_masks = gt_masks & ~pred_masks + # whether the prediction completely match the ground-truth on each mask + all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2) + all_correct = all_correct[..., None, None] + + # channel 0 is FP map, while channel 1 is FN map + pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device) + # sample a negative new click from FP region or a positive new click + # from FN region, depend on where the maximum falls, + # and in case the predictions are all correct (no FP or FN), we just + # sample a negative click from the background region + pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks) + pts_noise[..., 1] *= fn_masks + pts_idx = pts_noise.flatten(2).argmax(dim=2) + labels = (pts_idx % 2).to(torch.int32) + pts_idx = pts_idx // 2 + pts_x = pts_idx % W_im + pts_y = pts_idx // W_im + points = torch.stack([pts_x, pts_y], dim=2).to(torch.float) + return points, labels + + +def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True): + """ + Sample 1 random point (along with its label) from the center of each error region, + that is, the point with the largest distance to the boundary of each error region. + This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py + + Inputs: + - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool + - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None + - padding: if True, pad with boundary of 1 px for distance transform + + Outputs: + - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point + - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks + """ + if pred_masks is None: + pred_masks = torch.zeros_like(gt_masks) + assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1 + assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape + + B, _, H, W = gt_masks.shape + + # false positive region, a new point sampled in this region should have + # negative label to correct the FP error + fp_masks = (~gt_masks & pred_masks).squeeze(1) + # false negative region, a new point sampled in this region should have + # positive label to correct the FN error + fn_masks = (gt_masks & ~pred_masks).squeeze(1) + + if padding: + padded_fp_masks = torch.zeros( + B, H + 2, W + 2, dtype=fp_masks.dtype, device=fp_masks.device + ) + padded_fp_masks[:, 1 : H + 1, 1 : W + 1] = fp_masks + padded_fn_masks = torch.zeros( + B, H + 2, W + 2, dtype=fp_masks.dtype, device=fp_masks.device + ) + padded_fn_masks[:, 1 : H + 1, 1 : W + 1] = fn_masks + else: + padded_fp_masks = fp_masks + padded_fn_masks = fn_masks + + fn_mask_dt = edt_triton(padded_fn_masks) + fp_mask_dt = edt_triton(padded_fp_masks) + if padding: + fn_mask_dt = fn_mask_dt[:, 1:-1, 1:-1] + fp_mask_dt = fp_mask_dt[:, 1:-1, 1:-1] + + fn_max, fn_argmax = fn_mask_dt.reshape(B, -1).max(dim=-1) + fp_max, fp_argmax = fp_mask_dt.reshape(B, -1).max(dim=-1) + is_positive = fn_max > fp_max + chosen = torch.where(is_positive, fn_argmax, fp_argmax) + points_x = chosen % W + points_y = chosen // W + + labels = is_positive.long() + points = torch.stack([points_x, points_y], -1) + return points.unsqueeze(1), labels.unsqueeze(1) + + +def sample_one_point_from_error_center_slow(gt_masks, pred_masks, padding=True): + """ + Sample 1 random point (along with its label) from the center of each error region, + that is, the point with the largest distance to the boundary of each error region. + This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py + + Inputs: + - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool + - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None + - padding: if True, pad with boundary of 1 px for distance transform + + Outputs: + - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point + - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks + """ + import cv2 # delay OpenCV import to avoid unnecessary dependency + + if pred_masks is None: + pred_masks = torch.zeros_like(gt_masks) + assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1 + assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape + + B, _, _, W_im = gt_masks.shape + device = gt_masks.device + + # false positive region, a new point sampled in this region should have + # negative label to correct the FP error + fp_masks = ~gt_masks & pred_masks + # false negative region, a new point sampled in this region should have + # positive label to correct the FN error + fn_masks = gt_masks & ~pred_masks + + fp_masks = fp_masks.cpu().numpy() + fn_masks = fn_masks.cpu().numpy() + points = torch.zeros(B, 1, 2, dtype=torch.float) + labels = torch.ones(B, 1, dtype=torch.int32) + for b in range(B): + fn_mask = fn_masks[b, 0] + fp_mask = fp_masks[b, 0] + if padding: + fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant") + fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant") + # compute the distance of each point in FN/FP region to its boundary + fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0) + fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0) + if padding: + fn_mask_dt = fn_mask_dt[1:-1, 1:-1] + fp_mask_dt = fp_mask_dt[1:-1, 1:-1] + + # take the point in FN/FP region with the largest distance to its boundary + fn_mask_dt_flat = fn_mask_dt.reshape(-1) + fp_mask_dt_flat = fp_mask_dt.reshape(-1) + fn_argmax = np.argmax(fn_mask_dt_flat) + fp_argmax = np.argmax(fp_mask_dt_flat) + is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax] + pt_idx = fn_argmax if is_positive else fp_argmax + points[b, 0, 0] = pt_idx % W_im # x + points[b, 0, 1] = pt_idx // W_im # y + labels[b, 0] = int(is_positive) + + points = points.to(device) + labels = labels.to(device) + return points, labels + + +def get_next_point(gt_masks, pred_masks, method): + if method == "uniform": + return sample_random_points_from_errors(gt_masks, pred_masks) + elif method == "center": + return sample_one_point_from_error_center(gt_masks, pred_masks) + else: + raise ValueError(f"unknown sampling method {method}") + + +def select_closest_cond_frames( + frame_idx, cond_frame_outputs, max_cond_frame_num, keep_first_cond_frame=False +): + """ + Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs` + that are temporally closest to the current frame at `frame_idx`. Here, we take + - a) the closest conditioning frame before `frame_idx` (if any); + - b) the closest conditioning frame after `frame_idx` (if any); + - c) any other temporally closest conditioning frames until reaching a total + of `max_cond_frame_num` conditioning frames. + + Outputs: + - selected_outputs: selected items (keys & values) from `cond_frame_outputs`. + - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`. + """ + if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num: + selected_outputs = cond_frame_outputs + unselected_outputs = {} + else: + assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames" + selected_outputs = {} + if keep_first_cond_frame: + idx_first = min( + (t for t in cond_frame_outputs if t < frame_idx), default=None + ) + if idx_first is None: + # Maybe we are tracking in reverse + idx_first = max( + (t for t in cond_frame_outputs if t > frame_idx), default=None + ) + if idx_first is not None: + selected_outputs[idx_first] = cond_frame_outputs[idx_first] + # the closest conditioning frame before `frame_idx` (if any) + idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None) + if idx_before is not None: + selected_outputs[idx_before] = cond_frame_outputs[idx_before] + + # the closest conditioning frame after `frame_idx` (if any) + idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None) + if idx_after is not None: + selected_outputs[idx_after] = cond_frame_outputs[idx_after] + + # add other temporally closest conditioning frames until reaching a total + # of `max_cond_frame_num` conditioning frames. + num_remain = max_cond_frame_num - len(selected_outputs) + inds_remain = sorted( + (t for t in cond_frame_outputs if t not in selected_outputs), + key=lambda x: abs(x - frame_idx), + )[:num_remain] + selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain) + unselected_outputs = { + t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs + } + + return selected_outputs, unselected_outputs + + +def get_1d_sine_pe(pos_inds, dim, temperature=10000): + """ + Get 1D sine positional embedding as in the original Transformer paper. + """ + pe_dim = dim // 2 + dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device) + dim_t = temperature ** (2 * (dim_t // 2) / pe_dim) + + pos_embed = pos_inds.unsqueeze(-1) / dim_t + pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1) + return pos_embed + + +def get_best_gt_match_from_multimasks(pred_multimasks, gt_masks, pred_scores=None): + """ + Get the mask with the best match to GT masks (based on IoU) from pred_multimasks. + Optionally, use `pred_scores` to break ties in case all IoUs are zeros. + """ + assert pred_multimasks.ndim == 4 and gt_masks.ndim == 4 + if pred_multimasks.size(1) == 1: + return pred_multimasks # only a single mask channel, nothing to select + + pred_multimasks_binary = pred_multimasks > 0 + area_i = torch.sum(pred_multimasks_binary & gt_masks, dim=(2, 3)).float() + area_u = torch.sum(pred_multimasks_binary | gt_masks, dim=(2, 3)).float() + ious = area_i / torch.clamp(area_u, min=1.0) + + # In case all IoUs are zeros (e.g. because the GT mask is empty), use pred_scores + # to break ties and select the best mask + if pred_scores is not None: + has_nonzero_ious = torch.any(ious > 0).expand_as(ious) + scores = torch.where(has_nonzero_ious, ious, pred_scores) + else: + scores = ious + + # Finally, take the best mask prediction (with the highest score) + best_scores_inds = torch.argmax(scores, dim=-1) + batch_inds = torch.arange(scores.size(0), device=scores.device) + best_pred_mask = pred_multimasks[batch_inds, best_scores_inds].unsqueeze(1) + return best_pred_mask + + +def fill_holes_in_mask_scores(mask, max_area, fill_holes=True, remove_sprinkles=True): + """ + A post processor to fill small holes in mask scores with area under `max_area`. + Holes are those small connected components in either background or foreground. + + Note that it relies on the "cc_torch" package to find connected components fast. You can + install it via the following command (`TORCH_CUDA_ARCH_LIST=8.0` is for A100 GPUs): + ``` + pip uninstall -y cc_torch; TORCH_CUDA_ARCH_LIST=8.0 9.0 pip install git+https://github.com/ronghanghu/cc_torch + ``` + Otherwise, it will fallback to a slightly slower triton implementation, or skimage if the tensor is on cpu + """ + + if max_area <= 0: + return mask # nothing to fill in this case + + if fill_holes: + # We remove small connected components in background by changing them to foreground + # with a small positive mask score (0.1). + mask_bg = mask <= 0 + bg_area_thresh = max_area + _, areas_bg = _get_connected_components_with_padding(mask_bg) + small_components_bg = mask_bg & (areas_bg <= bg_area_thresh) + mask = torch.where(small_components_bg, 0.1, mask) + + if remove_sprinkles: + # We remove small connected components in foreground by changing them to background + # with a small negative mask score (-0.1). Here we only remove connected components + # whose areas are under both `max_area` and half of the entire mask's area. This + # removes sprinkles while avoids filtering out tiny objects that we want to track. + mask_fg = mask > 0 + fg_area_thresh = torch.sum(mask_fg, dim=(2, 3), keepdim=True, dtype=torch.int32) + fg_area_thresh.floor_divide_(2).clamp_(max=max_area) + _, areas_fg = _get_connected_components_with_padding(mask_fg) + small_components_fg = mask_fg & (areas_fg <= fg_area_thresh) + mask = torch.where(small_components_fg, -0.1, mask) + return mask + + +def _get_connected_components_with_padding(mask): + """Get connected components from masks (possibly padding them to an even size).""" + from sam3.perflib.connected_components import connected_components + + mask = mask.to(torch.uint8) + _, _, H, W = mask.shape + # make sure both height and width are even (to be compatible with cc_torch) + pad_h = H % 2 + pad_w = W % 2 + if pad_h == 0 and pad_w == 0: + labels, counts = connected_components(mask) + else: + # pad the mask to make its height and width even + # padding format is (padding_left,padding_right,padding_top,padding_bottom) + mask_pad = F.pad(mask, (0, pad_w, 0, pad_h), mode="constant", value=0) + labels, counts = connected_components(mask_pad) + labels = labels[:, :, :H, :W] + counts = counts[:, :, :H, :W] + + return labels, counts diff --git a/detect_tools/sam3/sam3/model/sam3_tracking_predictor.py b/detect_tools/sam3/sam3/model/sam3_tracking_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..28ab2bd32c4c19f7f2fa93b5360f24485c65ca2d --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_tracking_predictor.py @@ -0,0 +1,1370 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +from collections import OrderedDict + +import torch + +from sam3.model.sam3_tracker_base import concat_points, NO_OBJ_SCORE, Sam3TrackerBase +from sam3.model.sam3_tracker_utils import fill_holes_in_mask_scores +from sam3.model.utils.sam2_utils import load_video_frames +from tqdm.auto import tqdm + + +class Sam3TrackerPredictor(Sam3TrackerBase): + """ + The demo class that extends the `Sam3TrackerBase` to handle user interactions + and manage inference states, with support for multi-object tracking. + """ + + def __init__( + self, + # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks; + # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True) + clear_non_cond_mem_around_input=False, + # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True). + clear_non_cond_mem_for_multi_obj=False, + # if fill_hole_area > 0, we fill small holes in the final masks up to this area (after resizing them to the original video resolution) + fill_hole_area=0, + # if always_start_from_first_ann_frame is True, we always start tracking from the frame where we receive the first annotation (clicks or mask) + # and ignore the `start_frame_idx` passed to `propagate_in_video` + always_start_from_first_ann_frame=False, + # the maximum number of points to be used in the prompt encoder, which reduce the domain gap between training (that only has 8 points) + # - if it's set to a positive integer, we only take the `max_point_num_in_prompt_enc//2` points and + # the last `(max_point_num_in_prompt_enc - max_point_num_in_prompt_enc//2)` points in the prompt encoder + # - if it's set to 0 or negative, this option is turned off and we use all points in the prompt encoder + max_point_num_in_prompt_enc=16, + non_overlap_masks_for_output=True, + # checkpoint_file=None, + **kwargs, + ): + super().__init__(**kwargs) + self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input + self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj + self.fill_hole_area = fill_hole_area + self.always_start_from_first_ann_frame = always_start_from_first_ann_frame + self.max_point_num_in_prompt_enc = max_point_num_in_prompt_enc + self.non_overlap_masks_for_output = non_overlap_masks_for_output + + self.bf16_context = torch.autocast(device_type="cuda", dtype=torch.bfloat16) + self.bf16_context.__enter__() # keep using for the entire model process + + self.iter_use_prev_mask_pred = True + self.add_all_frames_to_correct_as_cond = True + + @torch.inference_mode() + def init_state( + self, + video_height=None, + video_width=None, + num_frames=None, + video_path=None, + cached_features=None, + offload_video_to_cpu=False, + offload_state_to_cpu=False, + async_loading_frames=False, + ): + """Initialize a inference state.""" + inference_state = {} + # whether to offload the video frames to CPU memory + # turning on this option saves the GPU memory with only a very small overhead + inference_state["offload_video_to_cpu"] = offload_video_to_cpu + # whether to offload the inference state to CPU memory + # turning on this option saves the GPU memory at the cost of a lower tracking fps + # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object + # and from 24 to 21 when tracking two objects) + inference_state["offload_state_to_cpu"] = offload_state_to_cpu + inference_state["device"] = self.device + if offload_state_to_cpu: + inference_state["storage_device"] = torch.device("cpu") + else: + inference_state["storage_device"] = torch.device("cuda") + + if video_path is not None: + images, video_height, video_width = load_video_frames( + video_path=video_path, + image_size=self.image_size, + offload_video_to_cpu=offload_video_to_cpu, + async_loading_frames=async_loading_frames, + compute_device=inference_state["storage_device"], + ) + inference_state["images"] = images + inference_state["num_frames"] = len(images) + inference_state["video_height"] = video_height + inference_state["video_width"] = video_width + else: + # the original video height and width, used for resizing final output scores + inference_state["video_height"] = video_height + inference_state["video_width"] = video_width + inference_state["num_frames"] = num_frames + # inputs on each frame + inference_state["point_inputs_per_obj"] = {} + inference_state["mask_inputs_per_obj"] = {} + # visual features on a small number of recently visited frames for quick interactions + inference_state["cached_features"] = ( + {} if cached_features is None else cached_features + ) + # values that don't change across frames (so we only need to hold one copy of them) + inference_state["constants"] = {} + # mapping between client-side object id and model-side object index + inference_state["obj_id_to_idx"] = OrderedDict() + inference_state["obj_idx_to_id"] = OrderedDict() + inference_state["obj_ids"] = [] + # A storage to hold the model's tracking results and states on each frame + inference_state["output_dict"] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + # The index of the frame that received the first annotation + inference_state["first_ann_frame_idx"] = None + # Slice (view) of each object tracking results, sharing the same memory with "output_dict" + inference_state["output_dict_per_obj"] = {} + # A temporary storage to hold new outputs when user interact with a frame + # to add clicks or mask (it's merged into "output_dict" before propagation starts) + inference_state["temp_output_dict_per_obj"] = {} + # Frames that already holds consolidated outputs from click or mask inputs + # (we directly use their consolidated outputs during tracking) + inference_state["consolidated_frame_inds"] = { + "cond_frame_outputs": set(), # set containing frame indices + "non_cond_frame_outputs": set(), # set containing frame indices + } + # metadata for each tracking frame (e.g. which direction it's tracked) + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"] = {} + self.clear_all_points_in_video(inference_state) + return inference_state + + def _obj_id_to_idx(self, inference_state, obj_id): + """Map client-side object id to model-side object index.""" + obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None) + if obj_idx is not None: + return obj_idx + + # This is a new object id not sent to the server before. We only allow adding + # new objects *before* the tracking starts. + allow_new_object = not inference_state["tracking_has_started"] + if allow_new_object: + # get the next object slot + obj_idx = len(inference_state["obj_id_to_idx"]) + inference_state["obj_id_to_idx"][obj_id] = obj_idx + inference_state["obj_idx_to_id"][obj_idx] = obj_id + inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"]) + # set up input and output structures for this object + inference_state["point_inputs_per_obj"][obj_idx] = {} + inference_state["mask_inputs_per_obj"][obj_idx] = {} + inference_state["output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + inference_state["temp_output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + return obj_idx + else: + raise RuntimeError( + f"Cannot add new object id {obj_id} after tracking starts. " + f"All existing object ids: {inference_state['obj_ids']}." + ) + + def _obj_idx_to_id(self, inference_state, obj_idx): + """Map model-side object index to client-side object id.""" + return inference_state["obj_idx_to_id"][obj_idx] + + def _get_obj_num(self, inference_state): + """Get the total number of unique object ids received so far in this session.""" + return len(inference_state["obj_idx_to_id"]) + + @torch.inference_mode() + def add_new_points_or_box( + self, + inference_state, + frame_idx, + obj_id, + points=None, + labels=None, + clear_old_points=True, + rel_coordinates=True, + use_prev_mem_frame=False, + normalize_coords=True, + box=None, + ): + """Add new points to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if (points is not None) != (labels is not None): + raise ValueError("points and labels must be provided together") + if points is None and box is None: + raise ValueError("at least one of points or box must be provided as input") + + if points is None: + points = torch.zeros(0, 2, dtype=torch.float32) + elif not isinstance(points, torch.Tensor): + points = torch.tensor(points, dtype=torch.float32) + if labels is None: + labels = torch.zeros(0, dtype=torch.int32) + elif not isinstance(labels, torch.Tensor): + labels = torch.tensor(labels, dtype=torch.int32) + if points.dim() == 2: + points = points.unsqueeze(0) # add batch dimension + if labels.dim() == 1: + labels = labels.unsqueeze(0) # add batch dimension + + if rel_coordinates: + # convert the points from relative coordinates to absolute coordinates + if points is not None: + points = points * self.image_size + if box is not None: + box = box * self.image_size + + # If `box` is provided, we add it as the first two points with labels 2 and 3 + # along with the user-provided points (consistent with how SAM 2 is trained). + if box is not None: + if not clear_old_points: + raise ValueError( + "cannot add box without clearing old points, since " + "box prompt must be provided before any point prompt " + "(please use clear_old_points=True instead)" + ) + if not isinstance(box, torch.Tensor): + box = torch.tensor(box, dtype=torch.float32, device=points.device) + box_coords = box.reshape(1, 2, 2) + box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device) + box_labels = box_labels.reshape(1, 2) + points = torch.cat([box_coords, points], dim=1) + labels = torch.cat([box_labels, labels], dim=1) + + points = points.to(inference_state["device"]) + labels = labels.to(inference_state["device"]) + + if not clear_old_points: + point_inputs = point_inputs_per_frame.get(frame_idx, None) + else: + point_inputs = None + point_inputs = concat_points(point_inputs, points, labels) + + point_inputs_per_frame[frame_idx] = point_inputs + mask_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + # Limit to a maximum number of input points to the prompt encoder (to reduce domain gap) + num_points = point_inputs["point_coords"].size(1) + if num_points > self.max_point_num_in_prompt_enc > 0: + num_first = self.max_point_num_in_prompt_enc // 2 + num_last = self.max_point_num_in_prompt_enc - num_first + point_inputs["point_coords"] = torch.cat( + [ + point_inputs["point_coords"][:, :num_first], + point_inputs["point_coords"][:, -num_last:], + ], + dim=1, + ) + point_inputs["point_labels"] = torch.cat( + [ + point_inputs["point_labels"][:, :num_first], + point_inputs["point_labels"][:, -num_last:], + ], + dim=1, + ) + logging.warning( + f"Too many points ({num_points}) are provided on frame {frame_idx}. Only " + f"the first {num_first} points and the last {num_last} points will be used." + ) + # Get any previously predicted mask logits on this object and feed it along with + # the new clicks into the SAM mask decoder when `self.iter_use_prev_mask_pred=True`. + prev_sam_mask_logits = None + if self.iter_use_prev_mask_pred: + # lookup temporary output dict first, which contains the most recent output + # (if not found, then lookup conditioning and non-conditioning frame output) + prev_out = obj_temp_output_dict[storage_key].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx) + + if prev_out is not None and prev_out["pred_masks"] is not None: + prev_sam_mask_logits = prev_out["pred_masks"].cuda(non_blocking=True) + # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues. + prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0) + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=point_inputs, + mask_inputs=None, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + prev_sam_mask_logits=prev_sam_mask_logits, + use_prev_mem_frame=use_prev_mem_frame, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + low_res_masks = None # not needed by the demo + return frame_idx, obj_ids, low_res_masks, video_res_masks + + @torch.inference_mode() + def add_new_mask( + self, + inference_state, + frame_idx, + obj_id, + mask, + add_mask_to_memory=False, + ): + """Add new mask to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + assert mask.dim() == 2 + mask_H, mask_W = mask.shape + mask_inputs_orig = mask[None, None] # add batch and channel dimension + mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"]) + + # resize the mask if it doesn't match the model's input mask size + if mask_H != self.input_mask_size or mask_W != self.input_mask_size: + mask_inputs = torch.nn.functional.interpolate( + mask_inputs_orig, + size=(self.input_mask_size, self.input_mask_size), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + mask_inputs = mask_inputs_orig + + # also get the mask at the original video resolution (for outputting) + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + if mask_H != video_H or mask_W != video_W: + mask_inputs_video_res = torch.nn.functional.interpolate( + mask_inputs_orig, + size=(video_H, video_W), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for potential downsampling + ) + else: + mask_inputs_video_res = mask_inputs_orig + # convert mask_inputs_video_res to binary (threshold at 0.5 as it is in range 0~1) + mask_inputs_video_res = mask_inputs_video_res > 0.5 + + mask_inputs_per_frame[frame_idx] = mask_inputs_video_res + point_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=None, + mask_inputs=mask_inputs, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + ) + # We directly use the input mask at video resolution as the output mask for a better + # video editing experience (so that the masks don't change after each brushing). + # Here NO_OBJ_SCORE is a large negative value to represent the background and + # similarly -NO_OBJ_SCORE is a large positive value to represent the foreground. + current_out["pred_masks"] = None + current_out["pred_masks_video_res"] = torch.where( + mask_inputs_video_res, -NO_OBJ_SCORE, NO_OBJ_SCORE + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + # Remove the overlapping proportion of other objects' input masks on this frame + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + for obj_idx2, obj_temp_output_dict2 in temp_output_dict_per_obj.items(): + if obj_idx2 == obj_idx: + continue + current_out2 = obj_temp_output_dict2[storage_key].get(frame_idx, None) + if current_out2 is not None and "pred_masks_video_res" in current_out2: + current_out2["pred_masks_video_res"] = torch.where( + mask_inputs_video_res, + NO_OBJ_SCORE, + current_out2["pred_masks_video_res"], + ) + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + low_res_masks = None # not needed by the demo + return frame_idx, obj_ids, low_res_masks, video_res_masks + + def add_new_points(self, *args, **kwargs): + """Deprecated method. Please use `add_new_points_or_box` instead.""" + return self.add_new_points_or_box(*args, **kwargs) + + def _get_orig_video_res_output(self, inference_state, any_res_masks): + """ + Resize the object scores to the original video resolution (video_res_masks) + and apply non-overlapping constraints for final output. + """ + device = inference_state["device"] + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + any_res_masks = any_res_masks.to(device, non_blocking=True) + if any_res_masks.shape[-2:] == (video_H, video_W): + video_res_masks = any_res_masks + else: + video_res_masks = torch.nn.functional.interpolate( + any_res_masks, + size=(video_H, video_W), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks_for_output: + video_res_masks = self._apply_non_overlapping_constraints(video_res_masks) + # potentially fill holes in the predicted masks + if self.fill_hole_area > 0: + video_res_masks = fill_holes_in_mask_scores( + video_res_masks, self.fill_hole_area + ) + return any_res_masks, video_res_masks + + def _consolidate_temp_output_across_obj( + self, + inference_state, + frame_idx, + is_cond, + run_mem_encoder, + consolidate_at_video_res=False, + ): + """ + Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on + a frame into a single output for all objects, including + 1) fill any missing objects either from `output_dict_per_obj` (if they exist in + `output_dict_per_obj` for this frame) or leave them as placeholder values + (if they don't exist in `output_dict_per_obj` for this frame); + 2) if specified, rerun memory encoder after apply non-overlapping constraints + on the object scores. + """ + batch_size = self._get_obj_num(inference_state) + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Optionally, we allow consolidating the temporary outputs at the original + # video resolution (to provide a better editing experience for mask prompts). + if consolidate_at_video_res: + assert not run_mem_encoder, "memory encoder cannot run at video resolution" + consolidated_H = inference_state["video_height"] + consolidated_W = inference_state["video_width"] + consolidated_mask_key = "pred_masks_video_res" + else: + consolidated_H = consolidated_W = self.low_res_mask_size + consolidated_mask_key = "pred_masks" + + # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc" + # will be added when rerunning the memory encoder after applying non-overlapping + # constraints to object scores. Its "pred_masks" are prefilled with a large + # negative value (NO_OBJ_SCORE) to represent missing objects. + consolidated_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + consolidated_mask_key: torch.full( + size=(batch_size, 1, consolidated_H, consolidated_W), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["storage_device"], + ), + "obj_ptr": torch.full( + size=(batch_size, self.hidden_dim), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["device"], + ), + "object_score_logits": torch.full( + size=(batch_size, 1), + # default to 10.0 for object_score_logits, i.e. assuming the object is + # present as sigmoid(10)=1, same as in `predict_masks` of `MaskDecoder` + fill_value=10.0, + dtype=torch.float32, + device=inference_state["device"], + ), + } + if self.use_memory_selection: + consolidated_out["iou_score"] = torch.full( + size=(batch_size, 1), + fill_value=0.0, + dtype=torch.float32, + device=inference_state["device"], + ) + empty_mask_ptr = None + for obj_idx in range(batch_size): + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + out = obj_temp_output_dict[storage_key].get(frame_idx, None) + # If the object doesn't appear in "temp_output_dict_per_obj" on this frame, + # we fall back and look up its previous output in "output_dict_per_obj". + # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in + # "output_dict_per_obj" to find a previous output for this object. + if out is None: + out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None) + if out is None: + out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None) + # If the object doesn't appear in "output_dict_per_obj" either, we skip it + # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE + # placeholder above) and set its object pointer to be a dummy pointer. + if out is None: + # Fill in dummy object pointers for those objects without any inputs or + # tracking outcomes on this frame (only do it under `run_mem_encoder=True`, + # i.e. when we need to build the memory for tracking). + if run_mem_encoder: + if empty_mask_ptr is None: + empty_mask_ptr = self._get_empty_mask_ptr( + inference_state, frame_idx + ) + # fill object pointer with a dummy pointer (based on an empty mask) + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr + continue + # Add the temporary object output mask to consolidated output mask + # (use "pred_masks_video_res" if it's available) + obj_mask = out.get("pred_masks_video_res", out["pred_masks"]) + consolidated_pred_masks = consolidated_out[consolidated_mask_key] + if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]: + consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask + else: + # Resize first if temporary object mask has a different resolution + is_downsampling = "pred_masks_video_res" in out + resized_obj_mask = torch.nn.functional.interpolate( + obj_mask, + size=consolidated_pred_masks.shape[-2:], + mode="bilinear", + align_corners=False, + antialias=is_downsampling, # use antialias for downsampling + ) + consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"] + consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out[ + "object_score_logits" + ] + if self.use_memory_selection: + consolidated_out["iou_score"][obj_idx : obj_idx + 1] = out["iou_score"] + # Optionally, apply non-overlapping constraints on the consolidated scores + # and rerun the memory encoder + if run_mem_encoder: + device = inference_state["device"] + high_res_masks = torch.nn.functional.interpolate( + consolidated_out["pred_masks"].to(device, non_blocking=True), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + high_res_masks = self._apply_non_overlapping_constraints(high_res_masks) + maskmem_features, maskmem_pos_enc = self._run_memory_encoder( + inference_state=inference_state, + frame_idx=frame_idx, + batch_size=batch_size, + high_res_masks=high_res_masks, + object_score_logits=consolidated_out["object_score_logits"], + is_mask_from_pts=True, # these frames are what the user interacted with + ) + consolidated_out["maskmem_features"] = maskmem_features + consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc + + return consolidated_out + + def _get_empty_mask_ptr(self, inference_state, frame_idx): + """Get a dummy object pointer based on an empty mask on the current frame.""" + # A dummy (empty) mask with a single object + batch_size = 1 + mask_inputs = torch.zeros( + (batch_size, 1, self.image_size, self.image_size), + dtype=torch.float32, + device=inference_state["device"], + ) + + # Retrieve correct image features + ( + image, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # Feed the empty mask and image feature above to get a dummy object pointer + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=True, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + image=image, + point_inputs=None, + mask_inputs=mask_inputs, + gt_masks=None, + frames_to_add_correction_pt=[], + output_dict={ + "cond_frame_outputs": {}, + "non_cond_frame_outputs": {}, + }, + num_frames=inference_state["num_frames"], + track_in_reverse=False, + run_mem_encoder=False, + prev_sam_mask_logits=None, + ) + return current_out["obj_ptr"] + + @torch.inference_mode() + def propagate_in_video_preflight(self, inference_state, run_mem_encoder=True): + """Prepare inference_state and consolidate temporary outputs before tracking.""" + # Tracking has started and we don't allow adding new objects until session is reset. + inference_state["tracking_has_started"] = True + batch_size = self._get_obj_num(inference_state) + + # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and + # add them into "output_dict". + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + output_dict = inference_state["output_dict"] + # "consolidated_frame_inds" contains indices of those frames where consolidated + # temporary outputs have been added (either in this call or any previous calls + # to `propagate_in_video_preflight`). + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + for is_cond in [False, True]: + # Separately consolidate conditioning and non-conditioning temp outptus + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Find all the frames that contain temporary outputs for any objects + # (these should be the frames that have just received clicks for mask inputs + # via `add_new_points` or `add_new_mask`) + temp_frame_inds = set() + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + temp_frame_inds.update(obj_temp_output_dict[storage_key].keys()) + consolidated_frame_inds[storage_key].update(temp_frame_inds) + # consolidate the temprary output across all objects on this frame + for frame_idx in temp_frame_inds: + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=run_mem_encoder, + ) + # merge them into "output_dict" and also create per-object slices + output_dict[storage_key][frame_idx] = consolidated_out + self._add_output_per_object( + inference_state, frame_idx, consolidated_out, storage_key + ) + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + + # clear temporary outputs in `temp_output_dict_per_obj` + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + obj_temp_output_dict[storage_key].clear() + + # edge case: if an output is added to "cond_frame_outputs", we remove any prior + # output on the same frame in "non_cond_frame_outputs" + for frame_idx in output_dict["cond_frame_outputs"]: + output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + for frame_idx in obj_output_dict["cond_frame_outputs"]: + obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + assert frame_idx in output_dict["cond_frame_outputs"] + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + + # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames + # with either points or mask inputs (which should be true under a correct demo workflow). + all_consolidated_frame_inds = ( + consolidated_frame_inds["cond_frame_outputs"] + | consolidated_frame_inds["non_cond_frame_outputs"] + ) + input_frames_inds = set() + for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values(): + input_frames_inds.update(point_inputs_per_frame.keys()) + for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values(): + input_frames_inds.update(mask_inputs_per_frame.keys()) + assert all_consolidated_frame_inds == input_frames_inds + # Record the first interacted frame index (for tracking start) + if inference_state["first_ann_frame_idx"] is None: + inference_state["first_ann_frame_idx"] = min( + input_frames_inds, default=None + ) + # In case `first_ann_frame_idx` is not in the conditioning frames (e.g. because + # we cleared the input points on that frame), pick the first conditioning frame + if ( + inference_state["first_ann_frame_idx"] + not in output_dict["cond_frame_outputs"] + ): + inference_state["first_ann_frame_idx"] = min( + output_dict["cond_frame_outputs"], default=None + ) + + def _get_processing_order( + self, inference_state, start_frame_idx, max_frame_num_to_track, reverse + ): + num_frames = inference_state["num_frames"] + # set start index, end index, and processing order + if self.always_start_from_first_ann_frame: + # in this case, we always start tracking from the frame where we receive + # the initial annotation and ignore the provided start_frame_idx + start_frame_idx = inference_state["first_ann_frame_idx"] + if start_frame_idx is None: + # default: start from the earliest frame with input points + start_frame_idx = min(inference_state["output_dict"]["cond_frame_outputs"]) + if max_frame_num_to_track is None: + # default: track all the frames in the video + max_frame_num_to_track = num_frames + if reverse: + end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) + if start_frame_idx > 0: + processing_order = range(start_frame_idx, end_frame_idx - 1, -1) + else: + # this is the edge case where we start from frame 0 and track in reverse order; + # in this case, we track a single frame (frame 0) + processing_order = [0] + else: + end_frame_idx = min( + start_frame_idx + max_frame_num_to_track, num_frames - 1 + ) + processing_order = range(start_frame_idx, end_frame_idx + 1) + return processing_order + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx, + max_frame_num_to_track, + reverse, + tqdm_disable=False, + obj_ids=None, + run_mem_encoder=True, + propagate_preflight=False, + ): + """Propagate the input points across frames to track in the entire video.""" + if propagate_preflight: + self.propagate_in_video_preflight(inference_state) + # NOTE: This is a copy from the parent class, except that we return object scores as well. + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + if obj_ids is not None: + raise NotImplementedError( + "Per-object tracking yet for batched inference if not implemented." + ) + obj_ids = inference_state["obj_ids"] + batch_size = self._get_obj_num(inference_state) + if len(output_dict["cond_frame_outputs"]) == 0: + raise RuntimeError("No points are provided; please add points first") + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + + processing_order = self._get_processing_order( + inference_state, + start_frame_idx, + max_frame_num_to_track, + reverse, + ) + + for frame_idx in tqdm( + processing_order, desc="propagate in video", disable=tqdm_disable + ): + # We skip those frames already in consolidated outputs (these are frames + # that received input clicks or mask). Note that we cannot directly run + # batched forward on them via `_run_single_frame_inference` because the + # number of clicks on each object might be different. + if frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + storage_key = "cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + obj_scores = current_out["object_score_logits"] + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]: + storage_key = "non_cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + obj_scores = current_out["object_score_logits"] + else: + storage_key = "non_cond_frame_outputs" + current_out, pred_masks = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=output_dict, + frame_idx=frame_idx, + batch_size=batch_size, + is_init_cond_frame=False, + point_inputs=None, + mask_inputs=None, + reverse=reverse, + run_mem_encoder=run_mem_encoder, + ) + obj_scores = current_out["object_score_logits"] + output_dict[storage_key][frame_idx] = current_out + # Create slices of per-object outputs for subsequent interaction with each + # individual object after tracking. + self._add_output_per_object( + inference_state, frame_idx, current_out, storage_key + ) + inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse} + + # Resize the output mask to the original video resolution (we directly use + # the mask scores on GPU for output to avoid any CPU conversion in between) + low_res_masks, video_res_masks = self._get_orig_video_res_output( + inference_state, pred_masks + ) + yield frame_idx, obj_ids, low_res_masks, video_res_masks, obj_scores + + def _add_output_per_object( + self, inference_state, frame_idx, current_out, storage_key + ): + """ + Split a multi-object output into per-object output slices and add them into + `output_dict_per_obj`. The resulting slices share the same tensor storage. + """ + maskmem_features = current_out["maskmem_features"] + assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor) + + maskmem_pos_enc = current_out["maskmem_pos_enc"] + assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list) + + output_dict_per_obj = inference_state["output_dict_per_obj"] + for obj_idx, obj_output_dict in output_dict_per_obj.items(): + obj_slice = slice(obj_idx, obj_idx + 1) + obj_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + "pred_masks": current_out["pred_masks"][obj_slice], + "obj_ptr": current_out["obj_ptr"][obj_slice], + "object_score_logits": current_out["object_score_logits"][obj_slice], + } + if self.use_memory_selection: + obj_out["iou_score"] = current_out["iou_score"][obj_slice] + if maskmem_features is not None: + obj_out["maskmem_features"] = maskmem_features[obj_slice] + if maskmem_pos_enc is not None: + obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc] + obj_output_dict[storage_key][frame_idx] = obj_out + + @torch.inference_mode() + def clear_all_points_in_frame( + self, inference_state, frame_idx, obj_id, need_output=True + ): + """Remove all input points or mask in a specific frame for a given object.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + + # Clear the conditioning information on the given frame + inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None) + inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None) + + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None) + temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None) + + # Check and see if there are still any inputs left on this frame + batch_size = self._get_obj_num(inference_state) + frame_has_input = False + for obj_idx2 in range(batch_size): + if frame_idx in inference_state["point_inputs_per_obj"][obj_idx2]: + frame_has_input = True + break + if frame_idx in inference_state["mask_inputs_per_obj"][obj_idx2]: + frame_has_input = True + break + + # If this frame has no remaining inputs for any objects, we further clear its + # conditioning frame status + if not frame_has_input: + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + consolidated_frame_inds["cond_frame_outputs"].discard(frame_idx) + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + # Remove the frame's conditioning output (possibly downgrading it to non-conditioning) + out = output_dict["cond_frame_outputs"].pop(frame_idx, None) + if out is not None: + # The frame is not a conditioning frame anymore since it's not receiving inputs, + # so we "downgrade" its output (if exists) to a non-conditioning frame output. + output_dict["non_cond_frame_outputs"][frame_idx] = out + inference_state["frames_already_tracked"].pop(frame_idx, None) + # Similarly, do it for the sliced output on each object. + for obj_idx2 in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx2] + obj_out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None) + if obj_out is not None: + obj_output_dict["non_cond_frame_outputs"][frame_idx] = obj_out + + # If all the conditioning frames have been removed, we also clear the tracking outputs + if len(output_dict["cond_frame_outputs"]) == 0: + self._reset_tracking_results(inference_state) + + if not need_output: + return + # Finally, output updated masks per object (after removing the inputs above) + obj_ids = inference_state["obj_ids"] + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + low_res_masks = None # not needed by the demo + return frame_idx, obj_ids, low_res_masks, video_res_masks + + @torch.inference_mode() + def clear_all_points_in_video(self, inference_state): + """Remove all input points or mask in all frames throughout the video.""" + self._reset_tracking_results(inference_state) + # Remove all object ids + inference_state["obj_id_to_idx"].clear() + inference_state["obj_idx_to_id"].clear() + inference_state["obj_ids"].clear() + inference_state["point_inputs_per_obj"].clear() + inference_state["mask_inputs_per_obj"].clear() + inference_state["output_dict_per_obj"].clear() + inference_state["temp_output_dict_per_obj"].clear() + + def _reset_tracking_results(self, inference_state): + """Reset all tracking inputs and results across the videos.""" + for v in inference_state["point_inputs_per_obj"].values(): + v.clear() + for v in inference_state["mask_inputs_per_obj"].values(): + v.clear() + for v in inference_state["output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + for v in inference_state["temp_output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + inference_state["output_dict"]["cond_frame_outputs"].clear() + inference_state["output_dict"]["non_cond_frame_outputs"].clear() + inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear() + inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear() + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"].clear() + inference_state["first_ann_frame_idx"] = None + + def _get_image_feature(self, inference_state, frame_idx, batch_size): + """Compute the image features on a given frame.""" + # Look up in the cache + image, backbone_out = inference_state["cached_features"].get( + frame_idx, (None, None) + ) + if backbone_out is None: + if self.backbone is None: + raise RuntimeError( + f"Image features for frame {frame_idx} are not cached. " + "Please run inference on this frame first." + ) + else: + # Cache miss -- we will run inference on a single image + image = inference_state["images"][frame_idx].cuda().float().unsqueeze(0) + backbone_out = self.forward_image(image) + # Cache the most recent frame's feature (for repeated interactions with + # a frame; we can use an LRU cache for more frames in the future). + inference_state["cached_features"] = {frame_idx: (image, backbone_out)} + if "tracker_backbone_out" in backbone_out: + backbone_out = backbone_out["tracker_backbone_out"] # get backbone output + + # expand the features to have the same dimension as the number of objects + expanded_image = image.expand(batch_size, -1, -1, -1) + expanded_backbone_out = { + "backbone_fpn": backbone_out["backbone_fpn"].copy(), + "vision_pos_enc": backbone_out["vision_pos_enc"].copy(), + } + for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]): + feat = feat.expand(batch_size, -1, -1, -1) + expanded_backbone_out["backbone_fpn"][i] = feat + for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]): + pos = pos.expand(batch_size, -1, -1, -1) + expanded_backbone_out["vision_pos_enc"][i] = pos + + features = self._prepare_backbone_features(expanded_backbone_out) + features = (expanded_image,) + features + return features + + def _run_single_frame_inference( + self, + inference_state, + output_dict, + frame_idx, + batch_size, + is_init_cond_frame, + point_inputs, + mask_inputs, + reverse, + run_mem_encoder, + prev_sam_mask_logits=None, + use_prev_mem_frame=True, + ): + """Run tracking on a single frame based on current inputs and previous memory.""" + # Retrieve correct image features + ( + image, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # point and mask should not appear as input simultaneously on the same frame + assert point_inputs is None or mask_inputs is None + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + image=image, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + output_dict=output_dict, + num_frames=inference_state["num_frames"], + track_in_reverse=reverse, + run_mem_encoder=run_mem_encoder, + prev_sam_mask_logits=prev_sam_mask_logits, + use_prev_mem_frame=use_prev_mem_frame, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = current_out["maskmem_features"] + if maskmem_features is not None: + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + pred_masks_gpu = current_out["pred_masks"] + pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out) + # object pointer is a small tensor, so we always keep it on GPU memory for fast access + obj_ptr = current_out["obj_ptr"] + object_score_logits = current_out["object_score_logits"] + # make a compact version of this frame's output to reduce the state size + compact_current_out = { + "maskmem_features": maskmem_features, + "maskmem_pos_enc": maskmem_pos_enc, + "pred_masks": pred_masks, + "obj_ptr": obj_ptr, + "object_score_logits": object_score_logits, + } + if self.use_memory_selection: + compact_current_out["iou_score"] = current_out["iou_score"] + compact_current_out["eff_iou_score"] = current_out["eff_iou_score"] + return compact_current_out, pred_masks_gpu + + def _run_memory_encoder( + self, + inference_state, + frame_idx, + batch_size, + high_res_masks, + object_score_logits, + is_mask_from_pts, + ): + """ + Run the memory encoder on `high_res_masks`. This is usually after applying + non-overlapping constraints to object scores. Since their scores changed, their + memory also need to be computed again with the memory encoder. + """ + # Retrieve correct image features + image, _, current_vision_feats, _, feat_sizes = self._get_image_feature( + inference_state, frame_idx, batch_size + ) + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + image=image, + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks, + object_score_logits=object_score_logits, + is_mask_from_pts=is_mask_from_pts, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc( + inference_state, {"maskmem_pos_enc": maskmem_pos_enc} + ) + return maskmem_features, maskmem_pos_enc + + def _get_maskmem_pos_enc(self, inference_state, current_out): + """ + `maskmem_pos_enc` is the same across frames and objects, so we cache it as + a constant in the inference session to reduce session storage size. + """ + model_constants = inference_state["constants"] + # "out_maskmem_pos_enc" should be either a list of tensors or None + out_maskmem_pos_enc = current_out["maskmem_pos_enc"] + if out_maskmem_pos_enc is not None: + if "maskmem_pos_enc" not in model_constants: + assert isinstance(out_maskmem_pos_enc, list) + # only take the slice for one object, since it's same across objects + maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc] + model_constants["maskmem_pos_enc"] = maskmem_pos_enc + else: + maskmem_pos_enc = model_constants["maskmem_pos_enc"] + # expand the cached maskmem_pos_enc to the actual batch size + batch_size = out_maskmem_pos_enc[0].size(0) + expanded_maskmem_pos_enc = [ + x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc + ] + else: + expanded_maskmem_pos_enc = None + return expanded_maskmem_pos_enc + + @torch.inference_mode() + def remove_object(self, inference_state, obj_id, strict=False, need_output=True): + """ + Remove an object id from the tracking state. If strict is True, we check whether + the object id actually exists and raise an error if it doesn't exist. + """ + old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None) + updated_frames = [] + # Check whether this object_id to remove actually exists and possibly raise an error. + if old_obj_idx_to_rm is None: + if not strict: + return inference_state["obj_ids"], updated_frames + raise RuntimeError( + f"Cannot remove object id {obj_id} as it doesn't exist. " + f"All existing object ids: {inference_state['obj_ids']}." + ) + + # If this is the only remaining object id, we simply reset the state. + if len(inference_state["obj_id_to_idx"]) == 1: + self.clear_all_points_in_video(inference_state) + return inference_state["obj_ids"], updated_frames + + # There are still remaining objects after removing this object id. In this case, + # we need to delete the object storage from inference state tensors. + # Step 0: clear the input on those frames where this object id has point or mask input + # (note that this step is required as it might downgrade conditioning frames to + # non-conditioning ones) + obj_input_frames_inds = set() + obj_input_frames_inds.update( + inference_state["point_inputs_per_obj"][old_obj_idx_to_rm] + ) + obj_input_frames_inds.update( + inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm] + ) + for frame_idx in obj_input_frames_inds: + self.clear_all_points_in_frame( + inference_state, frame_idx, obj_id, need_output=False + ) + + # Step 1: Update the object id mapping (note that it must be done after Step 0, + # since Step 0 still requires the old object id mappings in inference_state) + old_obj_ids = inference_state["obj_ids"] + old_obj_inds = list(range(len(old_obj_ids))) + remain_old_obj_inds = old_obj_inds.copy() + remain_old_obj_inds.remove(old_obj_idx_to_rm) + new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds] + new_obj_inds = list(range(len(new_obj_ids))) + # build new mappings + old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds)) + inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds)) + inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids)) + inference_state["obj_ids"] = new_obj_ids + + # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys. + # (note that "consolidated_frame_inds" doesn't need to be updated in this step as + # it's already handled in Step 0) + def _map_keys(container): + new_kvs = [] + for k in old_obj_inds: + v = container.pop(k) + if k in old_idx_to_new_idx: + new_kvs.append((old_idx_to_new_idx[k], v)) + container.update(new_kvs) + + _map_keys(inference_state["point_inputs_per_obj"]) + _map_keys(inference_state["mask_inputs_per_obj"]) + _map_keys(inference_state["output_dict_per_obj"]) + _map_keys(inference_state["temp_output_dict_per_obj"]) + + # Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices. + def _slice_state(output_dict, storage_key): + for frame_idx, out in output_dict[storage_key].items(): + out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds] + out["maskmem_pos_enc"] = [ + x[remain_old_obj_inds] for x in out["maskmem_pos_enc"] + ] + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out) + out["pred_masks"] = out["pred_masks"][remain_old_obj_inds] + out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds] + out["object_score_logits"] = out["object_score_logits"][ + remain_old_obj_inds + ] + if self.use_memory_selection: + out["iou_score"] = out["iou_score"][remain_old_obj_inds] + out["eff_iou_score"] = self.cal_mem_score( + out["object_score_logits"], out["iou_score"] + ) # recalculate the memory frame score + # also update the per-object slices + self._add_output_per_object( + inference_state, frame_idx, out, storage_key + ) + + _slice_state(inference_state["output_dict"], "cond_frame_outputs") + _slice_state(inference_state["output_dict"], "non_cond_frame_outputs") + + # Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which + # could show an updated mask for objects previously occluded by the object being removed + if need_output: + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + for frame_idx in obj_input_frames_inds: + is_cond = any( + frame_idx in obj_temp_output_dict["cond_frame_outputs"] + for obj_temp_output_dict in temp_output_dict_per_obj.values() + ) + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + updated_frames.append((frame_idx, video_res_masks)) + + return inference_state["obj_ids"], updated_frames + + def _clear_non_cond_mem_around_input(self, inference_state, frame_idx): + """ + Remove the non-conditioning memory around the input frame. When users provide + correction clicks, the surrounding frames' non-conditioning memories can still + contain outdated object appearance information and could confuse the model. + + This method clears those non-conditioning memories surrounding the interacted + frame to avoid giving the model both old and new information about the object. + """ + r = self.memory_temporal_stride_for_eval + frame_idx_begin = frame_idx - r * self.num_maskmem + frame_idx_end = frame_idx + r * self.num_maskmem + batch_size = self._get_obj_num(inference_state) + for obj_idx in range(batch_size): + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + non_cond_frame_outputs = obj_output_dict["non_cond_frame_outputs"] + for t in range(frame_idx_begin, frame_idx_end + 1): + non_cond_frame_outputs.pop(t, None) + + def _suppress_shrinked_masks( + self, pred_masks, new_pred_masks, shrink_threshold=0.3 + ): + area_before = (pred_masks > 0).sum(dim=(-1, -2)) + area_after = (new_pred_masks > 0).sum(dim=(-1, -2)) + area_before = torch.clamp(area_before, min=1.0) + area_ratio = area_after / area_before + keep = area_ratio >= shrink_threshold + keep_mask = keep[..., None, None].expand_as(pred_masks) + pred_masks_after = torch.where( + keep_mask, pred_masks, torch.clamp(pred_masks, max=-10.0) + ) + return pred_masks_after + + def _suppress_object_pw_area_shrinkage(self, pred_masks): + """ + This function suppresses masks that shrink in area after applying pixelwise non-overlapping constriants. + Note that the final output can still be overlapping. + """ + # Apply pixel-wise non-overlapping constraint based on mask scores + pixel_level_non_overlapping_masks = super()._apply_non_overlapping_constraints( + pred_masks + ) + # Fully suppress masks with high shrinkage (probably noisy) based on the pixel wise non-overlapping constraints + # NOTE: The output of this function can be a no op if none of the masks shrinked by a large factor. + pred_masks = self._suppress_shrinked_masks( + pred_masks, pixel_level_non_overlapping_masks + ) + return pred_masks + + def _apply_object_wise_non_overlapping_constraints( + self, pred_masks, obj_scores, background_value=-10.0 + ): + """ + Applies non-overlapping constraints object wise (i.e. only one object can claim the overlapping region) + """ + # Replace pixel scores with object scores + pred_masks_single_score = torch.where( + pred_masks > 0, obj_scores[..., None, None], background_value + ) + # Apply pixel-wise non-overlapping constraint based on mask scores + pixel_level_non_overlapping_masks = super()._apply_non_overlapping_constraints( + pred_masks_single_score + ) + # Replace object scores with pixel scores. Note, that now only one object can claim the overlapping region + pred_masks = torch.where( + pixel_level_non_overlapping_masks > 0, + pred_masks, + torch.clamp(pred_masks, max=background_value), + ) + return pred_masks diff --git a/detect_tools/sam3/sam3/model/sam3_video_base.py b/detect_tools/sam3/sam3/model/sam3_video_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e61969f6d581101bb49e8eeb604a0d38d88da5bb --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_video_base.py @@ -0,0 +1,1767 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import datetime +import logging +import math +import os +from collections import defaultdict +from copy import deepcopy +from enum import Enum +from typing import Any, Dict, List, Set + +import numpy as np +import numpy.typing as npt +import torch +import torch.distributed as dist +import torch.nn.functional as F + +from sam3 import perflib +from sam3.logger import get_logger +from sam3.model.box_ops import fast_diag_box_iou +from sam3.model.data_misc import BatchedDatapoint +from sam3.model.sam3_tracker_utils import fill_holes_in_mask_scores, mask_to_box +from sam3.perflib.masks_ops import mask_iou +from sam3.train.masks_ops import rle_encode +from torch import nn, Tensor + +logger = get_logger(__name__) + + +class MaskletConfirmationStatus(Enum): + UNCONFIRMED = 1 # newly added masklet, not confirmed by any detection yet + CONFIRMED = 2 # confirmed by at least one detection + + +class Sam3VideoBase(nn.Module): + def __init__( + self, + detector: nn.Module, + tracker: nn.Module, + # prob threshold for detection outputs -- only keep detections above this threshold + # enters NMS and det-to-track matching + score_threshold_detection=0.5, + # IoU threshold for detection NMS + det_nms_thresh=0.0, + # IoU threshold for det-to-track matching -- a detection is considered "matched" to a tracklet it + # overlaps with a tracklet above this threshold -- it is often a loose threshold like 0.1 + assoc_iou_thresh=0.5, + # IoU threshold for det-to-track matching, which is used to determine whether a masklet is "unmatched" + # by any detections -- it is often a stricter threshold like 0.5 + trk_assoc_iou_thresh=0.5, + # prob threshold for a detection to be added as a new object + new_det_thresh=0.0, + # hotstart parameters: we hold off the outputs for `hotstart_delay` frames and + # 1) remove those tracklets unmatched by any detections based on `hotstart_unmatch_thresh` + # 2) remove those tracklets overlapping with one another based on `hotstart_dup_thresh` + hotstart_delay=0, + hotstart_unmatch_thresh=3, + hotstart_dup_thresh=3, + # Whether to suppress masks only within hotstart. If False, we can suppress masks even if they start before hotstart period. + suppress_unmatched_only_within_hotstart=True, + init_trk_keep_alive=0, + max_trk_keep_alive=8, + min_trk_keep_alive=-4, + # Threshold for suppressing overlapping objects based on recent occlusion + suppress_overlapping_based_on_recent_occlusion_threshold=0.0, + decrease_trk_keep_alive_for_empty_masklets=False, + o2o_matching_masklets_enable=False, # Enable hungarian matching to match existing masklets + suppress_det_close_to_boundary=False, + fill_hole_area=16, + # The maximum number of objects (masklets) to track across all GPUs (for no limit, set it to -1) + max_num_objects=-1, + recondition_every_nth_frame=-1, + # masket confirmation status (to suppress unconfirmed masklets) + masklet_confirmation_enable=False, + # a masklet is confirmed after being consecutively detected and matched for + # `masklet_confirmation_consecutive_det_thresh` + masklet_confirmation_consecutive_det_thresh=3, + # bbox heuristic parameters + reconstruction_bbox_iou_thresh=0.0, + reconstruction_bbox_det_score=0.0, + ): + super().__init__() + self.detector = detector + self.tracker = tracker + self.score_threshold_detection = score_threshold_detection + self.det_nms_thresh = det_nms_thresh + self.assoc_iou_thresh = assoc_iou_thresh + self.trk_assoc_iou_thresh = trk_assoc_iou_thresh + self.new_det_thresh = new_det_thresh + + # hotstart parameters + if hotstart_delay > 0: + assert hotstart_unmatch_thresh <= hotstart_delay + assert hotstart_dup_thresh <= hotstart_delay + self.hotstart_delay = hotstart_delay + self.hotstart_unmatch_thresh = hotstart_unmatch_thresh + self.hotstart_dup_thresh = hotstart_dup_thresh + self.suppress_unmatched_only_within_hotstart = ( + suppress_unmatched_only_within_hotstart + ) + self.init_trk_keep_alive = init_trk_keep_alive + self.max_trk_keep_alive = max_trk_keep_alive + self.min_trk_keep_alive = min_trk_keep_alive + self.suppress_overlapping_based_on_recent_occlusion_threshold = ( + suppress_overlapping_based_on_recent_occlusion_threshold + ) + self.suppress_det_close_to_boundary = suppress_det_close_to_boundary + self.decrease_trk_keep_alive_for_empty_masklets = ( + decrease_trk_keep_alive_for_empty_masklets + ) + self.o2o_matching_masklets_enable = o2o_matching_masklets_enable + self.fill_hole_area = fill_hole_area + self.eval() + self.rank = int(os.getenv("RANK", "0")) + self.world_size = int(os.getenv("WORLD_SIZE", "1")) + self._dist_pg_cpu = None # CPU process group (lazy-initialized on first use) + + # the maximum object number + if max_num_objects > 0: + num_obj_for_compile = math.ceil(max_num_objects / self.world_size) + else: + max_num_objects = 10000 # no limit + num_obj_for_compile = 16 + logger.info(f"setting {max_num_objects=} and {num_obj_for_compile=}") + self.max_num_objects = max_num_objects + self.num_obj_for_compile = num_obj_for_compile + self.recondition_every_nth_frame = recondition_every_nth_frame + self.masklet_confirmation_enable = masklet_confirmation_enable + self.masklet_confirmation_consecutive_det_thresh = ( + masklet_confirmation_consecutive_det_thresh + ) + self.reconstruction_bbox_iou_thresh = reconstruction_bbox_iou_thresh + self.reconstruction_bbox_det_score = reconstruction_bbox_det_score + + @property + def device(self): + self._device = getattr(self, "_device", None) or next(self.parameters()).device + return self._device + + def _init_dist_pg_cpu(self): + # a short 3-min timeout to quickly detect any synchronization failures + timeout_sec = int(os.getenv("SAM3_COLLECTIVE_OP_TIMEOUT_SEC", "180")) + timeout = datetime.timedelta(seconds=timeout_sec) + self._dist_pg_cpu = dist.new_group(backend="gloo", timeout=timeout) + + def broadcast_python_obj_cpu(self, python_obj_list, src): + if self._dist_pg_cpu is None: + self._init_dist_pg_cpu() + dist.broadcast_object_list(python_obj_list, src=src, group=self._dist_pg_cpu) + + def _det_track_one_frame( + self, + frame_idx: int, + num_frames: int, + reverse: bool, + input_batch: BatchedDatapoint, + geometric_prompt: Any, + tracker_states_local: List[Any], + tracker_metadata_prev: Dict[str, Any], + feature_cache: Dict, + orig_vid_height: int, + orig_vid_width: int, + is_image_only: bool = False, + allow_new_detections: bool = True, + ): + """ + This function handles one-step inference for the DenseTracking model in an SPMD manner. + At a high-level, all GPUs execute the same function calls as if it's done on a single GPU, + while under the hood, some function calls involve distributed computation based on sharded + SAM2 states. + + - `input_batch` contains image and other inputs on the entire video; it should be identical across GPUs + - `tracker_states_local` holds the local masklet information in this GPU shard + - `tracker_metadata_prev` manages the metadata for SAM2 objects, such as which masklet is hold on which GPUs + it contains both global and local masklet information + """ + + # Step 1: run backbone and detector in a distributed manner -- this is done via Sam3ImageOnVideoMultiGPU, + # a MultiGPU model (assigned to `self.detector`) that shards frames in a round-robin manner. + # It returns a "det_out" dict for `frame_idx` and fills SAM2 backbone features for `frame_idx` + # into `feature_cache`. Despite its distributed inference under the hood, the results would be + # the same as if it is running backbone and detector for every frame on a single GPU. + det_out = self.run_backbone_and_detection( + frame_idx=frame_idx, + num_frames=num_frames, + reverse=reverse, + input_batch=input_batch, + geometric_prompt=geometric_prompt, + feature_cache=feature_cache, + allow_new_detections=allow_new_detections, + ) + + # Step 2: each GPU propagates its local SAM2 states to get the SAM2 prediction masks. + # the returned `tracker_low_res_masks_global` contains the concatenated masklet predictions + # gathered from all GPUs (as if they are propagated on a single GPU). Note that this step only + # runs the SAM2 propagation step, but doesn't encode new memory for the predicted masks; + # we defer memory encoding to `run_tracker_update_execution_phase` after resolving all heuristics. + if tracker_metadata_prev == {}: + # initialize masklet metadata if it's uninitialized (empty dict) + tracker_metadata_prev.update(self._initialize_metadata()) + tracker_low_res_masks_global, tracker_obj_scores_global = ( + self.run_tracker_propagation( + frame_idx=frame_idx, + num_frames=num_frames, + reverse=reverse, + tracker_states_local=tracker_states_local, + tracker_metadata_prev=tracker_metadata_prev, + ) + ) + + # Step 3: based on detection outputs and the propagated SAM2 prediction masks, we make plans + # for SAM2 masklet updates (i.e. which objects to add and remove, how to load-balance them, etc). + # We also run SAM2 memory encoder globally in this step to resolve non-overlapping constraints. + # **This step should involve all the heuristics needed for any updates.** Most of the update + # planning will be done on the master rank (GPU 0) and the resulting plan `tracker_update_plan` is + # broadcasted to other GPUs (to be executed in a distributed manner). This step also generates the + # new masklet metadata `tracker_metadata_new` (based on its previous version `tracker_metadata_prev`). + tracker_update_plan, tracker_metadata_new = ( + self.run_tracker_update_planning_phase( + frame_idx=frame_idx, + num_frames=num_frames, + reverse=reverse, + det_out=det_out, + tracker_low_res_masks_global=tracker_low_res_masks_global, + tracker_obj_scores_global=tracker_obj_scores_global, + tracker_metadata_prev=tracker_metadata_prev, + tracker_states_local=tracker_states_local, + is_image_only=is_image_only, + ) + ) + + # Get reconditioning info from the update plan + reconditioned_obj_ids = tracker_update_plan.get("reconditioned_obj_ids", set()) + det_to_matched_trk_obj_ids = tracker_update_plan.get( + "det_to_matched_trk_obj_ids", {} + ) + + # Step 4: based on `tracker_update_plan`, each GPU executes the update w.r.t. its local SAM2 inference states + tracker_states_local_new = self.run_tracker_update_execution_phase( + frame_idx=frame_idx, + num_frames=num_frames, + reverse=reverse, + det_out=det_out, + tracker_states_local=tracker_states_local, + tracker_update_plan=tracker_update_plan, + orig_vid_height=orig_vid_height, + orig_vid_width=orig_vid_width, + feature_cache=feature_cache, + ) + + # Step 5: finally, build the outputs for this frame (it only needs to be done on GPU 0 since + # only GPU 0 will send outputs to the server). + if self.rank == 0: + obj_id_to_mask = self.build_outputs( + frame_idx=frame_idx, + num_frames=num_frames, + reverse=reverse, + det_out=det_out, + tracker_low_res_masks_global=tracker_low_res_masks_global, + tracker_obj_scores_global=tracker_obj_scores_global, + tracker_metadata_prev=tracker_metadata_prev, + tracker_update_plan=tracker_update_plan, + orig_vid_height=orig_vid_height, + orig_vid_width=orig_vid_width, + reconditioned_obj_ids=reconditioned_obj_ids, + det_to_matched_trk_obj_ids=det_to_matched_trk_obj_ids, + ) + obj_id_to_score = tracker_metadata_new["obj_id_to_score"] + else: + obj_id_to_mask, obj_id_to_score = {}, {} # dummy outputs on other GPUs + # a few statistics for the current frame as a part of the output + frame_stats = { + "num_obj_tracked": np.sum(tracker_metadata_new["num_obj_per_gpu"]), + "num_obj_dropped": tracker_update_plan["num_obj_dropped_due_to_limit"], + } + # add tracker scores to metadata, it should be fired for frames except the first frame + if tracker_obj_scores_global.shape[0] > 0: + # Convert tracker_obj_scores_global to sigmoid scores before updating + tracker_obj_scores_global = tracker_obj_scores_global.sigmoid().tolist() + tracker_obj_ids = tracker_metadata_prev["obj_ids_all_gpu"] + tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][ + frame_idx + ].update(dict(zip(tracker_obj_ids, tracker_obj_scores_global))) + return ( + obj_id_to_mask, # a dict: obj_id --> output mask + obj_id_to_score, # a dict: obj_id --> output score (prob) + tracker_states_local_new, + tracker_metadata_new, + frame_stats, + tracker_obj_scores_global, # a dict: obj_id --> tracker frame-level scores + ) + + def _suppress_detections_close_to_boundary(self, boxes, margin=0.025): + """ + Suppress detections too close to image edges (for normalized boxes). + + boxes: (N, 4) in xyxy format, normalized [0,1] + margin: fraction of image + """ + x_min, y_min, x_max, y_max = boxes.unbind(-1) + x_c = (x_min + x_max) / 2 + y_c = (y_min + y_max) / 2 + keep = ( + (x_c > margin) + & (x_c < 1.0 - margin) + & (y_c > margin) + & (y_c < 1.0 - margin) + ) + + return keep + + def run_backbone_and_detection( + self, + frame_idx: int, + num_frames: int, + input_batch: BatchedDatapoint, + geometric_prompt: Any, + feature_cache: Dict, + reverse: bool, + allow_new_detections: bool, + ): + # Step 1: if text feature is not cached in `feature_cache`, compute and cache it + text_batch_key = tuple(input_batch.find_text_batch) + if "text" not in feature_cache or text_batch_key not in feature_cache["text"]: + text_outputs = self.detector.backbone.forward_text( + input_batch.find_text_batch, device=self.device + ) + # note: we only cache the text feature of the most recent prompt + feature_cache["text"] = {text_batch_key: text_outputs} + else: + text_outputs = feature_cache["text"][text_batch_key] + + # Step 2: run backbone, detector, and post-processing with NMS + if "multigpu_buffer" not in feature_cache: + # "multigpu_buffer" is a buffer cache used by `self.detector` and it needs + # to be passed to `forward_video_grounding_multigpu` for every call + feature_cache["multigpu_buffer"] = {} + + # Extract max_frame_num_to_track from feature_cache if available + tracking_bounds = feature_cache.get("tracking_bounds", {}) + max_frame_num_to_track = tracking_bounds.get("max_frame_num_to_track") + start_frame_idx = tracking_bounds.get("propagate_in_video_start_frame_idx") + + sam3_image_out, _ = self.detector.forward_video_grounding_multigpu( + backbone_out={ + "img_batch_all_stages": input_batch.img_batch, + **text_outputs, + }, + find_inputs=input_batch.find_inputs, + geometric_prompt=geometric_prompt, + frame_idx=frame_idx, + num_frames=num_frames, + multigpu_buffer=feature_cache["multigpu_buffer"], + track_in_reverse=reverse, + # also get the SAM2 backbone features + return_tracker_backbone_feats=True, + # run NMS as a part of distributed computation + run_nms=self.det_nms_thresh > 0.0, + nms_prob_thresh=self.score_threshold_detection, + nms_iou_thresh=self.det_nms_thresh, + # pass max_frame_num_to_track to respect tracking limits + max_frame_num_to_track=max_frame_num_to_track, + propagate_in_video_start_frame_idx=start_frame_idx, + ) + # note: detections in `sam3_image_out` has already gone through NMS + pred_probs = sam3_image_out["pred_logits"].squeeze(-1).sigmoid() + if not allow_new_detections: + pred_probs = pred_probs - 1e8 # make sure no detections are kept + pred_boxes_xyxy = sam3_image_out["pred_boxes_xyxy"] + pred_masks = sam3_image_out["pred_masks"] + # get the positive detection outputs above threshold + pos_pred_idx = torch.where(pred_probs > self.score_threshold_detection) + det_out = { + "bbox": pred_boxes_xyxy[pos_pred_idx[0], pos_pred_idx[1]], + "mask": pred_masks[pos_pred_idx[0], pos_pred_idx[1]], + "scores": pred_probs[pos_pred_idx[0], pos_pred_idx[1]], + } + + # Step 3: build SAM2 backbone features and store them in `feature_cache` + backbone_cache = {} + sam_mask_decoder = self.tracker.sam_mask_decoder + tracker_backbone_fpn = [ + sam_mask_decoder.conv_s0(sam3_image_out["tracker_backbone_fpn_0"]), + sam_mask_decoder.conv_s1(sam3_image_out["tracker_backbone_fpn_1"]), + sam3_image_out["tracker_backbone_fpn_2"], # fpn_2 doesn't need conv + ] + tracker_backbone_out = { + "vision_features": tracker_backbone_fpn[-1], # top-level feature + "vision_pos_enc": sam3_image_out["tracker_backbone_pos_enc"], + "backbone_fpn": tracker_backbone_fpn, + } + backbone_cache["tracker_backbone_out"] = tracker_backbone_out + feature_cache[frame_idx] = ( + input_batch.img_batch[frame_idx], + backbone_cache, + ) + # remove from `feature_cache` old features to save GPU memory + feature_cache.pop(frame_idx - 1 if not reverse else frame_idx + 1, None) + return det_out + + def run_tracker_propagation( + self, + frame_idx: int, + num_frames: int, + reverse: bool, + tracker_states_local: List[Any], + tracker_metadata_prev: Dict[str, npt.NDArray], + ): + # Step 1: propagate the local SAM2 states to get the current frame's prediction + # `low_res_masks_local` of the existing masklets on this GPU + # - obj_ids_local: List[int] -- list of object IDs + # - low_res_masks_local: Tensor -- (num_local_obj, H_mask, W_mask) + obj_ids_local, low_res_masks_local, obj_scores_local = ( + self._propogate_tracker_one_frame_local_gpu( + tracker_states_local, frame_idx=frame_idx, reverse=reverse + ) + ) + + assert np.all( + obj_ids_local == tracker_metadata_prev["obj_ids_per_gpu"][self.rank] + ), "{} != {}".format( + obj_ids_local, tracker_metadata_prev["obj_ids_per_gpu"][self.rank] + ) + + # Step 2: all-gather `low_res_masks_local` into `low_res_masks_global` + # - low_res_masks_global: Tensor -- (num_global_obj, H_mask, W_mask) + _, H_mask, W_mask = low_res_masks_local.shape + if self.world_size > 1: + # `low_res_masks_local` and `obj_scores_local` need to be contiguous and float32 + # (they could be non-contiguous due to slicing and/or bfloat16 due to autocast) + low_res_masks_local = low_res_masks_local.float().contiguous() + obj_scores_local = obj_scores_local.float().contiguous() + num_obj_this_gpu = tracker_metadata_prev["num_obj_per_gpu"][self.rank] + assert low_res_masks_local.size(0) == num_obj_this_gpu + assert obj_scores_local.size(0) == num_obj_this_gpu + low_res_masks_peers = [ + low_res_masks_local.new_empty(num_obj, H_mask, W_mask) + for num_obj in tracker_metadata_prev["num_obj_per_gpu"] + ] + obj_scores_peers = [ + obj_scores_local.new_empty(num_obj) + for num_obj in tracker_metadata_prev["num_obj_per_gpu"] + ] + dist.all_gather(low_res_masks_peers, low_res_masks_local) + dist.all_gather(obj_scores_peers, obj_scores_local) + low_res_masks_global = torch.cat(low_res_masks_peers, dim=0) + obj_scores_global = torch.cat(obj_scores_peers, dim=0) + else: + low_res_masks_global = low_res_masks_local + obj_scores_global = obj_scores_local + return low_res_masks_global, obj_scores_global + + def _recondition_masklets( + self, + frame_idx, + det_out: Dict[str, Tensor], + trk_id_to_max_iou_high_conf_det: List[int], + tracker_states_local: List[Any], + tracker_metadata: Dict[str, npt.NDArray], + tracker_obj_scores_global: Tensor, + ): + # Recondition the masklets based on the new detections + for trk_obj_id, det_idx in trk_id_to_max_iou_high_conf_det.items(): + new_mask = det_out["mask"][det_idx : det_idx + 1] + input_mask_res = self.tracker.input_mask_size + new_mask_binary = ( + F.interpolate( + new_mask.unsqueeze(1), + size=(input_mask_res, input_mask_res), + mode="bilinear", + align_corners=False, + ).squeeze(1)[0] + > 0 + ) + HIGH_CONF_THRESH = 0.8 + reconditioned_states_idx = set() + obj_idx = np.where(tracker_metadata["obj_ids_all_gpu"] == trk_obj_id)[ + 0 + ].item() + obj_score = tracker_obj_scores_global[obj_idx] + for state_idx, inference_state in enumerate(tracker_states_local): + if ( + trk_obj_id in inference_state["obj_ids"] + # NOTE: Goal of this condition is to avoid reconditioning masks that are occluded/low qualiy. + # Unfortunately, these can get reconditioned anyway due to batching. We should consider removing these heuristics. + and obj_score > HIGH_CONF_THRESH + ): + logger.debug( + f"Adding new mask for track {trk_obj_id} at frame {frame_idx}. Objects {inference_state['obj_ids']} are all reconditioned." + ) + self.tracker.add_new_mask( + inference_state=inference_state, + frame_idx=frame_idx, + obj_id=trk_obj_id, + mask=new_mask_binary, + ) + reconditioned_states_idx.add(state_idx) + + for idx in reconditioned_states_idx: + self.tracker.propagate_in_video_preflight( + tracker_states_local[idx], run_mem_encoder=True + ) + return tracker_states_local + + def run_tracker_update_planning_phase( + self, + frame_idx: int, + num_frames: int, + reverse: bool, + det_out: Dict[str, Tensor], + tracker_low_res_masks_global: Tensor, + tracker_obj_scores_global: Tensor, + tracker_metadata_prev: Dict[str, npt.NDArray], + tracker_states_local: List[Any], + is_image_only: bool = False, + ): + # initialize new metadata from previous metadata (its values will be updated later) + tracker_metadata_new = { + "obj_ids_per_gpu": deepcopy(tracker_metadata_prev["obj_ids_per_gpu"]), + "obj_ids_all_gpu": None, # will be filled later + "num_obj_per_gpu": deepcopy(tracker_metadata_prev["num_obj_per_gpu"]), + "obj_id_to_score": deepcopy(tracker_metadata_prev["obj_id_to_score"]), + "obj_id_to_tracker_score_frame_wise": deepcopy( + tracker_metadata_prev["obj_id_to_tracker_score_frame_wise"] + ), + "obj_id_to_last_occluded": {}, # will be filled later + "max_obj_id": deepcopy(tracker_metadata_prev["max_obj_id"]), + } + + # Initialize reconditioned_obj_ids early to avoid UnboundLocalError + reconditioned_obj_ids = set() + + # Step 1: make the update plan and resolve heuristics on GPU 0 + det_mask_preds: Tensor = det_out["mask"] # low-res mask logits + det_scores_np: npt.NDArray = det_out["scores"].float().cpu().numpy() + det_bbox_xyxy: Tensor = det_out["bbox"] + if self.rank == 0: + # a) match detector and tracker masks and find new objects + ( + new_det_fa_inds, + unmatched_trk_obj_ids, + det_to_matched_trk_obj_ids, + trk_id_to_max_iou_high_conf_det, + empty_trk_obj_ids, + ) = self._associate_det_trk( + det_masks=det_mask_preds, + det_scores_np=det_scores_np, + trk_masks=tracker_low_res_masks_global, + trk_obj_ids=tracker_metadata_prev["obj_ids_all_gpu"], + ) + if self.suppress_det_close_to_boundary: + keep = self._suppress_detections_close_to_boundary( + det_bbox_xyxy[new_det_fa_inds] + ) + new_det_fa_inds = new_det_fa_inds[keep.cpu().numpy()] + + # check whether we've hit the maximum number of objects we can track (and if so, drop some detections) + prev_obj_num = np.sum(tracker_metadata_prev["num_obj_per_gpu"]) + new_det_num = len(new_det_fa_inds) + num_obj_dropped_due_to_limit = 0 + if not is_image_only and prev_obj_num + new_det_num > self.max_num_objects: + logger.warning( + f"hitting {self.max_num_objects=} with {new_det_num=} and {prev_obj_num=}" + ) + new_det_num_to_keep = self.max_num_objects - prev_obj_num + num_obj_dropped_due_to_limit = new_det_num - new_det_num_to_keep + new_det_fa_inds = self._drop_new_det_with_obj_limit( + new_det_fa_inds, det_scores_np, new_det_num_to_keep + ) + assert len(new_det_fa_inds) == new_det_num_to_keep + new_det_num = len(new_det_fa_inds) + + # assign object IDs to new detections and decide which GPU to place them + new_det_start_obj_id = tracker_metadata_prev["max_obj_id"] + 1 + new_det_obj_ids = new_det_start_obj_id + np.arange(new_det_num) + prev_workload_per_gpu = tracker_metadata_prev["num_obj_per_gpu"] + new_det_gpu_ids = self._assign_new_det_to_gpus( + new_det_num=new_det_num, + prev_workload_per_gpu=prev_workload_per_gpu, + ) + + # b) handle hotstart heuristics to remove objects + # here `rank0_metadata` contains metadata stored on (and only accessible to) GPU 0; + # we avoid broadcasting them to other GPUs to save communication cost, assuming + # that `rank0_metadata` is not needed by other GPUs + rank0_metadata_new = deepcopy(tracker_metadata_prev["rank0_metadata"]) + if not hasattr(self, "_warm_up_complete") or self._warm_up_complete: + obj_ids_newly_removed, rank0_metadata_new = self._process_hotstart( + frame_idx=frame_idx, + num_frames=num_frames, + reverse=reverse, + det_to_matched_trk_obj_ids=det_to_matched_trk_obj_ids, + new_det_obj_ids=new_det_obj_ids, + empty_trk_obj_ids=empty_trk_obj_ids, + unmatched_trk_obj_ids=unmatched_trk_obj_ids, + rank0_metadata=rank0_metadata_new, + tracker_metadata=tracker_metadata_prev, + ) + else: + # if warm-up is not complete, we don't remove any objects + obj_ids_newly_removed = set() + tracker_metadata_new["rank0_metadata"] = rank0_metadata_new + + # Step 2: broadcast the update plan to other GPUs + NUM_BROADCAST_ITEMS = 9 + if self.rank == 0 and self.world_size > 1: + # `num_obj_per_gpu_on_rank0` is used for metadata consistency check on other GPUs + # (it's a small array with length==self.world_size, so broadcasting it is cheap) + num_obj_per_gpu_on_rank0 = tracker_metadata_prev["num_obj_per_gpu"] + update_plan = [ + new_det_fa_inds, + new_det_obj_ids, + new_det_gpu_ids, + num_obj_per_gpu_on_rank0, + unmatched_trk_obj_ids, + det_to_matched_trk_obj_ids, + obj_ids_newly_removed, + num_obj_dropped_due_to_limit, + trk_id_to_max_iou_high_conf_det, + ] + assert ( + len(update_plan) == NUM_BROADCAST_ITEMS + ), f"Manually update NUM_BROADCAST_ITEMS to be: {len(update_plan)}" + self.broadcast_python_obj_cpu(update_plan, src=0) + elif self.rank > 0 and self.world_size > 1: + update_plan = [ + None + ] * NUM_BROADCAST_ITEMS # other ranks receive the plan from rank 0 + self.broadcast_python_obj_cpu(update_plan, src=0) + ( + new_det_fa_inds, + new_det_obj_ids, + new_det_gpu_ids, + num_obj_per_gpu_on_rank0, + unmatched_trk_obj_ids, + det_to_matched_trk_obj_ids, + obj_ids_newly_removed, + num_obj_dropped_due_to_limit, + trk_id_to_max_iou_high_conf_det, + ) = update_plan + # metadata consistency check: verify that the received `num_obj_per_gpu_on_rank0` is consistent with the local metadata + # it's critical that all GPUs agree on the previous number of objects (otherwise the inference might hang or fail silently) + if not np.all( + num_obj_per_gpu_on_rank0 == tracker_metadata_prev["num_obj_per_gpu"] + ): + raise RuntimeError( + f"{self.rank=} received {num_obj_per_gpu_on_rank0=}, which is inconsistent with local record " + f"{tracker_metadata_prev['num_obj_per_gpu']=}. There's likely a bug in update planning or execution." + ) + + # `tracker_update_plan` should be identical on all GPUs after broadcasting + tracker_update_plan = { + "new_det_fa_inds": new_det_fa_inds, # npt.NDArray + "new_det_obj_ids": new_det_obj_ids, # npt.NDArray + "new_det_gpu_ids": new_det_gpu_ids, # npt.NDArray + "unmatched_trk_obj_ids": unmatched_trk_obj_ids, # npt.NDArray + "det_to_matched_trk_obj_ids": det_to_matched_trk_obj_ids, # dict + "obj_ids_newly_removed": obj_ids_newly_removed, # set + "num_obj_dropped_due_to_limit": num_obj_dropped_due_to_limit, # int + "trk_id_to_max_iou_high_conf_det": trk_id_to_max_iou_high_conf_det, # dict + "reconditioned_obj_ids": reconditioned_obj_ids, # set + } + + # Step 3 (optional): recondition masklets based on high-confidence detections before memory encoding + # NOTE: Running this in execution phase (after memory encoding) can lead to suboptimal results + should_recondition_iou = False + + # Evaluate tracklets for reconditioning based on bbox IoU mismatch with detections + if ( + self.reconstruction_bbox_iou_thresh > 0 + and len(trk_id_to_max_iou_high_conf_det) > 0 + ): + for trk_obj_id, det_idx in trk_id_to_max_iou_high_conf_det.items(): + det_box = det_out["bbox"][det_idx] + det_score = det_out["scores"][det_idx] + + try: + trk_idx = list(tracker_metadata_prev["obj_ids_all_gpu"]).index( + trk_obj_id + ) + except ValueError: + continue # Skip if tracklet not found + + tracker_mask = tracker_low_res_masks_global[trk_idx] + mask_binary = tracker_mask > 0 + mask_area = mask_binary.sum().item() + + if mask_area == 0: + continue # Skip tracklets with zero mask area + + # Get bounding box from SAM2 mask and convert to normalized coordinates + tracker_box_pixels = ( + mask_to_box(mask_binary.unsqueeze(0).unsqueeze(0)) + .squeeze(0) + .squeeze(0) + ) + mask_height, mask_width = tracker_mask.shape[-2:] + tracker_box_normalized = torch.tensor( + [ + tracker_box_pixels[0] / mask_width, + tracker_box_pixels[1] / mask_height, + tracker_box_pixels[2] / mask_width, + tracker_box_pixels[3] / mask_height, + ], + device=tracker_box_pixels.device, + ) + + # Compute IoU between detection and SAM2 tracklet bounding boxes + det_box_batch = det_box.unsqueeze(0) + tracker_box_batch = tracker_box_normalized.unsqueeze(0) + iou = fast_diag_box_iou(det_box_batch, tracker_box_batch)[0] + + if ( + iou < self.reconstruction_bbox_iou_thresh + and det_score >= self.reconstruction_bbox_det_score + ): + should_recondition_iou = True + reconditioned_obj_ids.add(trk_obj_id) + + should_recondition_periodic = ( + self.recondition_every_nth_frame > 0 + and frame_idx % self.recondition_every_nth_frame == 0 + and len(trk_id_to_max_iou_high_conf_det) > 0 + ) + + # Recondition if periodic or IoU condition met + if should_recondition_periodic or should_recondition_iou: + self._recondition_masklets( + frame_idx, + det_out, + trk_id_to_max_iou_high_conf_det, + tracker_states_local, + tracker_metadata_prev, + tracker_obj_scores_global, + ) + + # Step 4: Run SAM2 memory encoder on the current frame's prediction masks + # This is done on all GPUs + batch_size = tracker_low_res_masks_global.size(0) + if batch_size > 0: + if not hasattr(self, "_warm_up_complete") or self._warm_up_complete: + if self.suppress_overlapping_based_on_recent_occlusion_threshold > 0.0: + # NOTE: tracker_low_res_masks_global is updated in-place then returned + tracker_low_res_masks_global = ( + self._suppress_overlapping_based_on_recent_occlusion( + frame_idx, + tracker_low_res_masks_global, + tracker_metadata_prev, + tracker_metadata_new, + obj_ids_newly_removed, + reverse, + ) + ) + + self._tracker_update_memories( + tracker_states_local, + frame_idx, + tracker_metadata=tracker_metadata_prev, + low_res_masks=tracker_low_res_masks_global, + ) + + # Step 4: update the SAM2 metadata based on the update plan + # note: except for "rank0_metadata" (that is only available on GPU 0), + # the updated `tracker_metadata_new` should be identical on all GPUs + for rank in range(self.world_size): + new_det_obj_ids_this_gpu = new_det_obj_ids[new_det_gpu_ids == rank] + updated_obj_ids_this_gpu = tracker_metadata_new["obj_ids_per_gpu"][rank] + if len(new_det_obj_ids_this_gpu) > 0: + updated_obj_ids_this_gpu = np.concatenate( + [updated_obj_ids_this_gpu, new_det_obj_ids_this_gpu] + ) + if len(obj_ids_newly_removed) > 0: + is_removed = np.isin( + updated_obj_ids_this_gpu, list(obj_ids_newly_removed) + ) + updated_obj_ids_this_gpu = updated_obj_ids_this_gpu[~is_removed] + tracker_metadata_new["obj_ids_per_gpu"][rank] = updated_obj_ids_this_gpu + tracker_metadata_new["num_obj_per_gpu"][rank] = len( + updated_obj_ids_this_gpu + ) + tracker_metadata_new["obj_ids_all_gpu"] = np.concatenate( + tracker_metadata_new["obj_ids_per_gpu"] + ) + # update object scores and the maximum object ID assigned so far + if len(new_det_obj_ids) > 0: + tracker_metadata_new["obj_id_to_score"].update( + zip(new_det_obj_ids, det_scores_np[new_det_fa_inds]) + ) + # tracker scores are not available for new objects, use det score instead. + tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][ + frame_idx + ].update(zip(new_det_obj_ids, det_scores_np[new_det_fa_inds])) + tracker_metadata_new["max_obj_id"] = max( + tracker_metadata_new["max_obj_id"], + np.max(new_det_obj_ids), + ) + # for removed objects, we set their scores to a very low value (-1e4) but still + # keep them in "obj_id_to_score" (it's easier to handle outputs this way) + for obj_id in obj_ids_newly_removed: + tracker_metadata_new["obj_id_to_score"][obj_id] = -1e4 + tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx][ + obj_id + ] = -1e4 + tracker_metadata_new["obj_id_to_last_occluded"].pop(obj_id, None) + # check that "rank0_metadata" is in tracker_metadata_new if and only if it's GPU 0 + assert ("rank0_metadata" in tracker_metadata_new) == (self.rank == 0) + if self.rank == 0 and self.masklet_confirmation_enable: + rank0_metadata = self.update_masklet_confirmation_status( + rank0_metadata=tracker_metadata_new["rank0_metadata"], + obj_ids_all_gpu_prev=tracker_metadata_prev["obj_ids_all_gpu"], + obj_ids_all_gpu_updated=tracker_metadata_new["obj_ids_all_gpu"], + det_to_matched_trk_obj_ids=det_to_matched_trk_obj_ids, + new_det_obj_ids=new_det_obj_ids, + ) + tracker_metadata_new["rank0_metadata"] = rank0_metadata + + return tracker_update_plan, tracker_metadata_new + + def _suppress_overlapping_based_on_recent_occlusion( + self, + frame_idx: int, + tracker_low_res_masks_global: Tensor, + tracker_metadata_prev: Dict[str, Any], + tracker_metadata_new: Dict[str, Any], + obj_ids_newly_removed: Set[int], + reverse: bool = False, + ): + """ + Suppress overlapping masks based on the most recent occlusion information. If an object is removed by hotstart, we always suppress it if it overlaps with any other object. + Args: + frame_idx (int): The current frame index. + tracker_low_res_masks_global (Tensor): The low-resolution masks for the current frame. + tracker_metadata_prev (Dict[str, Any]): The metadata from the previous frame. + tracker_metadata_new (Dict[str, Any]): The metadata for the current frame. + obj_ids_newly_removed (Set[int]): The object IDs that have been removed. + Return: + Tensor: The updated low-resolution masks with some objects suppressed. + """ + obj_ids_global = tracker_metadata_prev["obj_ids_all_gpu"] + binary_tracker_low_res_masks_global = tracker_low_res_masks_global > 0 + batch_size = tracker_low_res_masks_global.size(0) + if batch_size > 0: + assert ( + len(obj_ids_global) == batch_size + ), f"Mismatch in number of objects: {len(obj_ids_global)} vs {batch_size}" + NEVER_OCCLUDED = -1 + ALWAYS_OCCLUDED = 100000 # This value should be larger than any possible frame index, indicates that the object was removed by hotstart logic + last_occluded_prev = torch.cat( + [ + tracker_metadata_prev["obj_id_to_last_occluded"].get( + obj_id, + torch.full( + (1,), + fill_value=( + NEVER_OCCLUDED + if obj_id not in obj_ids_newly_removed + else ALWAYS_OCCLUDED + ), + device=binary_tracker_low_res_masks_global.device, + dtype=torch.long, + ), + ) + for obj_id in obj_ids_global + ], + dim=0, + ) + to_suppress = self._get_objects_to_suppress_based_on_most_recently_occluded( + binary_tracker_low_res_masks_global, + last_occluded_prev, + obj_ids_global, + frame_idx, + reverse, + ) + + # Update metadata with occlusion information + is_obj_occluded = ~(binary_tracker_low_res_masks_global.any(dim=(-1, -2))) + is_obj_occluded_or_suppressed = is_obj_occluded | to_suppress + last_occluded_new = last_occluded_prev.clone() + last_occluded_new[is_obj_occluded_or_suppressed] = frame_idx + # Slice out the last occluded frame for each object + tracker_metadata_new["obj_id_to_last_occluded"] = { + obj_id: last_occluded_new[obj_idx : obj_idx + 1] + for obj_idx, obj_id in enumerate(obj_ids_global) + } + + # Zero out suppressed masks before memory encoding + NO_OBJ_LOGIT = -10 + tracker_low_res_masks_global[to_suppress] = NO_OBJ_LOGIT + + return tracker_low_res_masks_global + + def run_tracker_update_execution_phase( + self, + frame_idx: int, + num_frames: int, + reverse: bool, + det_out: Dict[str, Tensor], + tracker_states_local: List[Any], + tracker_update_plan: Dict[str, npt.NDArray], + orig_vid_height: int, + orig_vid_width: int, + feature_cache: Dict, + ): + # initialize tracking scores with detection scores + new_det_fa_inds: npt.NDArray = tracker_update_plan["new_det_fa_inds"] + new_det_obj_ids: npt.NDArray = tracker_update_plan["new_det_obj_ids"] + new_det_gpu_ids: npt.NDArray = tracker_update_plan["new_det_gpu_ids"] + is_on_this_gpu: npt.NDArray = new_det_gpu_ids == self.rank + new_det_obj_ids_local: npt.NDArray = new_det_obj_ids[is_on_this_gpu] + new_det_fa_inds_local: npt.NDArray = new_det_fa_inds[is_on_this_gpu] + obj_ids_newly_removed: Set[int] = tracker_update_plan["obj_ids_newly_removed"] + + # Step 1: add new objects from the detector to SAM2 inference states + if len(new_det_fa_inds_local) > 0: + new_det_fa_inds_local_t = torch.from_numpy(new_det_fa_inds_local) + new_det_masks: Tensor = det_out["mask"][new_det_fa_inds_local_t] + # initialize SAM2 with new object masks + tracker_states_local = self._tracker_add_new_objects( + frame_idx=frame_idx, + num_frames=num_frames, + new_obj_ids=new_det_obj_ids_local, + new_obj_masks=new_det_masks, + tracker_states_local=tracker_states_local, + orig_vid_height=orig_vid_height, + orig_vid_width=orig_vid_width, + feature_cache=feature_cache, + ) + + # Step 2: remove from SAM2 inference states those objects removed by heuristics + if len(obj_ids_newly_removed) > 0: + self._tracker_remove_objects(tracker_states_local, obj_ids_newly_removed) + + return tracker_states_local + + def build_outputs( + self, + frame_idx: int, + num_frames: int, + reverse: bool, + det_out: Dict[str, Tensor], + tracker_low_res_masks_global: Tensor, + tracker_obj_scores_global: Tensor, + tracker_metadata_prev: Dict[str, npt.NDArray], + tracker_update_plan: Dict[str, npt.NDArray], + orig_vid_height: int, + orig_vid_width: int, + reconditioned_obj_ids: set = None, + det_to_matched_trk_obj_ids: dict = None, + ): + new_det_fa_inds: npt.NDArray = tracker_update_plan["new_det_fa_inds"] + new_det_obj_ids: npt.NDArray = tracker_update_plan["new_det_obj_ids"] + obj_id_to_mask = {} # obj_id --> output mask tensor + + # Part 1: masks from previous SAM2 propagation + existing_masklet_obj_ids = tracker_metadata_prev["obj_ids_all_gpu"] + existing_masklet_video_res_masks = F.interpolate( + tracker_low_res_masks_global.unsqueeze(1), + size=(orig_vid_height, orig_vid_width), + mode="bilinear", + align_corners=False, + ) # (num_obj, 1, H_video, W_video) + existing_masklet_binary = existing_masklet_video_res_masks > 0 + assert len(existing_masklet_obj_ids) == len(existing_masklet_binary) + for obj_id, mask in zip(existing_masklet_obj_ids, existing_masklet_binary): + obj_id_to_mask[obj_id] = mask # (1, H_video, W_video) + + # Part 2: masks from new detections + new_det_fa_inds_t = torch.from_numpy(new_det_fa_inds) + new_det_low_res_masks = det_out["mask"][new_det_fa_inds_t].unsqueeze(1) + new_det_low_res_masks = fill_holes_in_mask_scores( + new_det_low_res_masks, + max_area=self.fill_hole_area, + fill_holes=True, + remove_sprinkles=True, + ) + new_masklet_video_res_masks = F.interpolate( + new_det_low_res_masks, + size=(orig_vid_height, orig_vid_width), + mode="bilinear", + align_corners=False, + ) # (num_obj, 1, H_video, W_video) + + new_masklet_binary = new_masklet_video_res_masks > 0 + assert len(new_det_obj_ids) == len(new_masklet_video_res_masks) + for obj_id, mask in zip(new_det_obj_ids, new_masklet_binary): + obj_id_to_mask[obj_id] = mask # (1, H_video, W_video) + + # Part 3: Override masks for reconditioned objects using detection masks + if reconditioned_obj_ids is not None and len(reconditioned_obj_ids) > 0: + trk_id_to_max_iou_high_conf_det = tracker_update_plan.get( + "trk_id_to_max_iou_high_conf_det", {} + ) + + for obj_id in reconditioned_obj_ids: + det_idx = trk_id_to_max_iou_high_conf_det.get(obj_id) + + if det_idx is not None: + det_mask = det_out["mask"][det_idx] + det_mask = det_mask.unsqueeze(0).unsqueeze(0) + det_mask_resized = ( + F.interpolate( + det_mask.float(), + size=(orig_vid_height, orig_vid_width), + mode="bilinear", + align_corners=False, + ) + > 0 + ) + + det_mask_final = det_mask_resized.squeeze(0) + obj_id_to_mask[obj_id] = det_mask_final + + return obj_id_to_mask + + def _get_objects_to_suppress_based_on_most_recently_occluded( + self, + binary_low_res_masks: Tensor, + last_occluded: List[int], + obj_ids: List[int], + frame_idx: int = None, + reverse: bool = False, + ): + # Suppress overlapping masks for objects that were most recently occluded + assert ( + binary_low_res_masks.dtype == torch.bool + ), f"Expected boolean tensor, got {binary_low_res_masks.dtype}" + to_suppress = torch.zeros( + binary_low_res_masks.size(0), + device=binary_low_res_masks.device, + dtype=torch.bool, + ) + if len(obj_ids) <= 1: + return to_suppress + + iou = mask_iou(binary_low_res_masks, binary_low_res_masks) # [N,N] + + # Create masks for upper triangular matrix (i < j) and IoU threshold + mask_iou_thresh = ( + iou >= self.suppress_overlapping_based_on_recent_occlusion_threshold + ) + overlapping_pairs = torch.triu(mask_iou_thresh, diagonal=1) # [N,N] + + last_occ_expanded_i = last_occluded.unsqueeze(1) # (N, 1) + last_occ_expanded_j = last_occluded.unsqueeze(0) # (1, N) + # Suppress most recently occluded + cmp_op = torch.gt if not reverse else torch.lt + suppress_i_mask = ( + overlapping_pairs + & cmp_op( + last_occ_expanded_i, last_occ_expanded_j + ) # (last_occ_expanded_i > last_occ_expanded_j) + & ( + last_occ_expanded_j > -1 + ) # j can suppress i only if i was previously occluded + ) + suppress_j_mask = ( + overlapping_pairs + & cmp_op(last_occ_expanded_j, last_occ_expanded_i) + & ( + last_occ_expanded_i > -1 + ) # i can suppress j only if j was previously occluded + ) + # Apply suppression + to_suppress = suppress_i_mask.any(dim=1) | suppress_j_mask.any(dim=0) + + # Log for debugging + if ( + self.rank == 0 + and logger.isEnabledFor(logging.DEBUG) + and frame_idx is not None + ): + suppress_i_mask = suppress_i_mask.cpu().numpy() + suppress_j_mask = suppress_j_mask.cpu().numpy() + last_occluded = last_occluded.cpu().numpy() + + # Find all suppression pairs without using torch.where + batch_size = suppress_i_mask.shape[0] + + # Log i-suppression cases (where i gets suppressed in favor of j) + for i in range(batch_size): + for j in range(batch_size): + if suppress_i_mask[i, j]: + logger.debug( + f"{frame_idx=}: Suppressing obj {obj_ids[i]} last occluded {last_occluded[i]} in favor of {obj_ids[j]} last occluded {last_occluded[j]}" + ) + + # Log j-suppression cases (where j gets suppressed in favor of i) + for i in range(batch_size): + for j in range(batch_size): + if suppress_j_mask[i, j]: + logger.debug( + f"{frame_idx=}: Suppressing obj {obj_ids[j]} last occluded {last_occluded[j]} in favor of {obj_ids[i]} last occluded {last_occluded[i]}" + ) + + return to_suppress + + def _propogate_tracker_one_frame_local_gpu( + self, + inference_states: List[Any], + frame_idx: int, + reverse: bool, + # by default, we disable memory encoding until we gather all outputs + run_mem_encoder: bool = False, + ): + """ + inference_states: List of inference states, each state corresponds to a different set of objects. + """ + obj_ids_local = [] + low_res_masks_list = [] + obj_scores_list = [] + for inference_state in inference_states: + if len(inference_state["obj_ids"]) == 0: + continue # skip propagation on empty inference states + + # propagate one frame + num_frames_propagated = 0 + for out in self.tracker.propagate_in_video( + inference_state, + start_frame_idx=frame_idx, + # end_frame_idx = start_frame_idx + max_frame_num_to_track + # (i.e. propagating 1 frame since end_frame_idx is inclusive) + max_frame_num_to_track=0, + reverse=reverse, + tqdm_disable=True, + run_mem_encoder=run_mem_encoder, + ): + out_frame_idx, out_obj_ids, out_low_res_masks, _, out_obj_scores = out + num_frames_propagated += 1 + + # only 1 frames should be propagated + assert ( + num_frames_propagated == 1 and out_frame_idx == frame_idx + ), f"num_frames_propagated: {num_frames_propagated}, out_frame_idx: {out_frame_idx}, frame_idx: {frame_idx}" + assert isinstance(out_obj_ids, list) + obj_ids_local.extend(out_obj_ids) + low_res_masks_list.append(out_low_res_masks.squeeze(1)) + obj_scores_list.append(out_obj_scores.squeeze(1)) + + # concatenate the output masklets from all local inference states + H_mask = W_mask = self.tracker.low_res_mask_size + if len(low_res_masks_list) > 0: + low_res_masks_local = torch.cat(low_res_masks_list, dim=0) + obj_scores_local = torch.cat(obj_scores_list, dim=0) + assert low_res_masks_local.shape[1:] == (H_mask, W_mask) + + # Apply hole filling to the masks + low_res_masks_local = fill_holes_in_mask_scores( + low_res_masks_local.unsqueeze(1), + max_area=self.fill_hole_area, + fill_holes=True, + remove_sprinkles=True, + ) + low_res_masks_local = low_res_masks_local.squeeze(1) + else: + low_res_masks_local = torch.zeros(0, H_mask, W_mask, device=self.device) + obj_scores_local = torch.zeros(0, device=self.device) + + return obj_ids_local, low_res_masks_local, obj_scores_local + + def _associate_det_trk( + self, + det_masks: Tensor, + det_scores_np: npt.NDArray, + trk_masks: Tensor, + trk_obj_ids: npt.NDArray, + ): + """ + Match detections on the current frame with the existing masklets. + + Args: + - det_masks: (N, H, W) tensor of predicted masks + - det_scores_np: (N,) array of detection scores + - trk_masks: (M, H, W) tensor of track masks + - trk_obj_ids: (M,) array of object IDs corresponding to trk_masks + + Returns: + - new_det_fa_inds: array of new object indices. + - unmatched_trk_obj_ids: array of existing masklet object IDs that are not matched + to any detections on this frame (for unmatched, we only count masklets with >0 area) + - det_to_matched_trk_obj_ids: dict[int, npt.NDArray]: mapping from detector's detection indices + to the list of matched tracklet object IDs + - empty_trk_obj_ids: array of existing masklet object IDs with zero area in SAM2 prediction + """ + iou_threshold = self.assoc_iou_thresh + iou_threshold_trk = self.trk_assoc_iou_thresh + new_det_thresh = self.new_det_thresh + + assert det_masks.is_floating_point(), "float tensor expected (do not binarize)" + assert trk_masks.is_floating_point(), "float tensor expected (do not binarize)" + assert ( + trk_masks.size(0) == len(trk_obj_ids) + ), f"trk_masks and trk_obj_ids should have the same length, {trk_masks.size(0)} vs {len(trk_obj_ids)}" + if trk_masks.size(0) == 0: + # all detections are new + new_det_fa_inds = np.arange(det_masks.size(0)) + unmatched_trk_obj_ids = np.array([], np.int64) + empty_trk_obj_ids = np.array([], np.int64) + det_to_matched_trk_obj_ids = {} + trk_id_to_max_iou_high_conf_det = {} + return ( + new_det_fa_inds, + unmatched_trk_obj_ids, + det_to_matched_trk_obj_ids, + trk_id_to_max_iou_high_conf_det, + empty_trk_obj_ids, + ) + elif det_masks.size(0) == 0: + # all previous tracklets are unmatched if they have a non-zero area + new_det_fa_inds = np.array([], np.int64) + trk_is_nonempty = (trk_masks > 0).any(dim=(1, 2)).cpu().numpy() + unmatched_trk_obj_ids = trk_obj_ids[trk_is_nonempty] + empty_trk_obj_ids = trk_obj_ids[~trk_is_nonempty] + det_to_matched_trk_obj_ids = {} + trk_id_to_max_iou_high_conf_det = {} + return ( + new_det_fa_inds, + unmatched_trk_obj_ids, + det_to_matched_trk_obj_ids, + trk_id_to_max_iou_high_conf_det, + empty_trk_obj_ids, + ) + + if det_masks.shape[-2:] != trk_masks.shape[-2:]: + # resize to the smaller size to save GPU memory + if np.prod(det_masks.shape[-2:]) < np.prod(trk_masks.shape[-2:]): + trk_masks = F.interpolate( + trk_masks.unsqueeze(1), + size=det_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ).squeeze(1) + else: + # resize detections to track size + det_masks = F.interpolate( + det_masks.unsqueeze(1), + size=trk_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ).squeeze(1) + + det_masks_binary = det_masks > 0 + trk_masks_binary = trk_masks > 0 + ious = mask_iou(det_masks_binary, trk_masks_binary) # (N, M) + + ious_np = ious.cpu().numpy() + if self.o2o_matching_masklets_enable: + from scipy.optimize import linear_sum_assignment + + # Hungarian matching for tracks (one-to-one: each track matches at most one detection) + cost_matrix = 1 - ious_np # Hungarian solves for minimum cost + row_ind, col_ind = linear_sum_assignment(cost_matrix) + trk_is_matched = np.zeros(trk_masks.size(0), dtype=bool) + for d, t in zip(row_ind, col_ind): + if ious_np[d, t] >= iou_threshold_trk: + trk_is_matched[t] = True + else: + trk_is_matched = (ious_np >= iou_threshold_trk).any(axis=0) + # Non-empty tracks not matched by Hungarian assignment above threshold are unmatched + trk_is_nonempty = trk_masks_binary.any(dim=(1, 2)).cpu().numpy() + trk_is_unmatched = np.logical_and(trk_is_nonempty, ~trk_is_matched) + unmatched_trk_obj_ids = trk_obj_ids[trk_is_unmatched] + # also record masklets that have zero area in SAM 2 prediction + empty_trk_obj_ids = trk_obj_ids[~trk_is_nonempty] + + # For detections: allow many tracks to match to the same detection (many-to-one) + # So, a detection is 'new' if it does not match any track above threshold + is_new_det = np.logical_and( + det_scores_np >= new_det_thresh, + np.logical_not(np.any(ious_np >= iou_threshold, axis=1)), + ) + new_det_fa_inds = np.nonzero(is_new_det)[0] + + # for each detection, which tracks it matched to (above threshold) + det_to_matched_trk_obj_ids = {} + trk_id_to_max_iou_high_conf_det = {} # trk id --> exactly one detection idx + HIGH_CONF_THRESH = 0.8 + HIGH_IOU_THRESH = 0.8 + det_to_max_iou_trk_idx = np.argmax(ious_np, axis=1) + det_is_high_conf = (det_scores_np >= HIGH_CONF_THRESH) & ~is_new_det + det_is_high_iou = np.max(ious_np, axis=1) >= HIGH_IOU_THRESH + det_is_high_conf_and_iou = set( + np.nonzero(det_is_high_conf & det_is_high_iou)[0] + ) + for d in range(det_masks.size(0)): + det_to_matched_trk_obj_ids[d] = trk_obj_ids[ious_np[d, :] >= iou_threshold] + if d in det_is_high_conf_and_iou: + trk_obj_id = trk_obj_ids[det_to_max_iou_trk_idx[d]].item() + trk_id_to_max_iou_high_conf_det[trk_obj_id] = d + + return ( + new_det_fa_inds, + unmatched_trk_obj_ids, + det_to_matched_trk_obj_ids, + trk_id_to_max_iou_high_conf_det, + empty_trk_obj_ids, + ) + + def _assign_new_det_to_gpus(self, new_det_num, prev_workload_per_gpu): + """Distribute the new objects to the GPUs with the least workload.""" + workload_per_gpu: npt.NDArray = prev_workload_per_gpu.copy() + new_det_gpu_ids = np.zeros(new_det_num, np.int64) + + # assign the objects one by one + for i in range(len(new_det_gpu_ids)): + # find the GPU with the least workload + min_gpu = np.argmin(workload_per_gpu) + new_det_gpu_ids[i] = min_gpu + workload_per_gpu[min_gpu] += 1 + return new_det_gpu_ids + + def _process_hotstart( + self, + frame_idx: int, + num_frames: int, + reverse: bool, + det_to_matched_trk_obj_ids: Dict[int, npt.NDArray], + new_det_obj_ids: npt.NDArray, + empty_trk_obj_ids: npt.NDArray, + unmatched_trk_obj_ids: npt.NDArray, + rank0_metadata: Dict[str, Any], + tracker_metadata: Dict[str, Any], + ): + """Handle hotstart heuristics to remove unmatched or duplicated objects.""" + # obj_id --> first frame index where the object was detected + obj_first_frame_idx = rank0_metadata["obj_first_frame_idx"] + # obj_id --> [mismatched frame indices] + unmatched_frame_inds = rank0_metadata["unmatched_frame_inds"] + trk_keep_alive = rank0_metadata["trk_keep_alive"] + # (first_appear_obj_id, obj_id) --> [overlap frame indices] + overlap_pair_to_frame_inds = rank0_metadata["overlap_pair_to_frame_inds"] + # removed_obj_ids: object IDs that are suppressed via hot-start + removed_obj_ids = rank0_metadata["removed_obj_ids"] + suppressed_obj_ids = rank0_metadata["suppressed_obj_ids"][frame_idx] + + obj_ids_newly_removed = set() # object IDs to be newly removed on this frame + hotstart_diff = ( + frame_idx - self.hotstart_delay + if not reverse + else frame_idx + self.hotstart_delay + ) + + # Step 1: log the frame index where each object ID first appears + for obj_id in new_det_obj_ids: + if obj_id not in obj_first_frame_idx: + obj_first_frame_idx[obj_id] = frame_idx + assert obj_id not in trk_keep_alive + trk_keep_alive[obj_id] = self.init_trk_keep_alive + + matched_trks = set() + # We use the det-->tracks list to check for matched objects. Otherwise, we need to compute areas to decide whether they're occluded + for matched_trks_per_det in det_to_matched_trk_obj_ids.values(): + matched_trks.update(matched_trks_per_det) + for obj_id in matched_trks: + # NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the max value of trk_keep_alive + trk_keep_alive[obj_id] = min( + self.max_trk_keep_alive, trk_keep_alive[obj_id] + 1 + ) + for obj_id in unmatched_trk_obj_ids: + unmatched_frame_inds[obj_id].append(frame_idx) + # NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the min value of trk_keep_alive + # The max keep alive is 2x the min, means the model prefers to keep the prediction rather than suppress it if it was matched long enough. + trk_keep_alive[obj_id] = max( + self.min_trk_keep_alive, trk_keep_alive[obj_id] - 1 + ) + if self.decrease_trk_keep_alive_for_empty_masklets: + for obj_id in empty_trk_obj_ids: + # NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the min value of trk_keep_alive + trk_keep_alive[obj_id] = max( + self.min_trk_keep_alive, trk_keep_alive[obj_id] - 1 + ) + + # Step 2: removed tracks that has not matched with detections for `hotstart_unmatch_thresh` frames with hotstart period + # a) add unmatched frame indices for each existing object ID + # note that `unmatched_trk_obj_ids` contains those frames where the SAM2 output mask + # doesn't match any detection; it excludes those frames where SAM2 gives an empty mask + # b) remove a masklet if it first appears after `hotstart_diff` and is unmatched for more + # than `self.hotstart_unmatch_thresh` frames + for obj_id, frame_indices in unmatched_frame_inds.items(): + if obj_id in removed_obj_ids or obj_id in obj_ids_newly_removed: + continue # skip if the object is already removed + if len(frame_indices) >= self.hotstart_unmatch_thresh: + is_within_hotstart = ( + obj_first_frame_idx[obj_id] > hotstart_diff and not reverse + ) or (obj_first_frame_idx[obj_id] < hotstart_diff and reverse) + if is_within_hotstart: + obj_ids_newly_removed.add(obj_id) + logger.debug( + f"Removing object {obj_id} at frame {frame_idx} " + f"since it is unmatched for frames: {frame_indices}" + ) + if ( + trk_keep_alive[obj_id] <= 0 # Object has not been matched for too long + and not self.suppress_unmatched_only_within_hotstart + and obj_id not in removed_obj_ids + and obj_id not in obj_ids_newly_removed + ): + logger.debug( + f"Suppressing object {obj_id} at frame {frame_idx}, due to being unmatched" + ) + suppressed_obj_ids.add(obj_id) + + # Step 3: removed tracks that overlaps with another track for `hotstart_dup_thresh` frames + # a) find overlaps tracks -- we consider overlap if they match to the same detection + for _, matched_trk_obj_ids in det_to_matched_trk_obj_ids.items(): + if len(matched_trk_obj_ids) < 2: + continue # only count detections that are matched to multiple (>=2) masklets + # if there are multiple matched track ids, we need to find the one that appeared first; + # these later appearing ids may be removed since they may be considered as duplicates + first_appear_obj_id = ( + min(matched_trk_obj_ids, key=lambda x: obj_first_frame_idx[x]) + if not reverse + else max(matched_trk_obj_ids, key=lambda x: obj_first_frame_idx[x]) + ) + for obj_id in matched_trk_obj_ids: + if obj_id != first_appear_obj_id: + key = (first_appear_obj_id, obj_id) + overlap_pair_to_frame_inds[key].append(frame_idx) + + # b) remove a masklet if it first appears after `hotstart_diff` and it overlaps with another + # masklet (that appears earlier) for more than `self.hotstart_dup_thresh` frames + for (first_obj_id, obj_id), frame_indices in overlap_pair_to_frame_inds.items(): + if obj_id in removed_obj_ids or obj_id in obj_ids_newly_removed: + continue # skip if the object is already removed + if (obj_first_frame_idx[obj_id] > hotstart_diff and not reverse) or ( + obj_first_frame_idx[obj_id] < hotstart_diff and reverse + ): + if len(frame_indices) >= self.hotstart_dup_thresh: + obj_ids_newly_removed.add(obj_id) + logger.debug( + f"Removing object {obj_id} at frame {frame_idx} " + f"since it overlaps with another track {first_obj_id} at frames: {frame_indices}" + ) + + removed_obj_ids.update(obj_ids_newly_removed) + return obj_ids_newly_removed, rank0_metadata + + def _tracker_update_memories( + self, + tracker_inference_states: List[Any], + frame_idx: int, + tracker_metadata: Dict[str, Any], + low_res_masks: Tensor, + ): + """ + Run Sam2 memory encoder, enforcing non-overlapping constraints globally. + """ + if len(tracker_inference_states) == 0: + return + # Avoid an extra interpolation step by directly interpolating to `interpol_size` + high_res_H, high_res_W = ( + self.tracker.maskmem_backbone.mask_downsampler.interpol_size + ) + # NOTE: inspect this part if we observe OOMs in the demo + high_res_masks = F.interpolate( + low_res_masks.unsqueeze(1), + size=(high_res_H, high_res_W), + mode="bilinear", + align_corners=False, + ) + # We first apply non-overlapping constraints before memory encoding. This may include some suppression heuristics. + if not hasattr(self, "_warm_up_complete") or self._warm_up_complete: + high_res_masks = self.tracker._suppress_object_pw_area_shrinkage( + high_res_masks + ) + # Instead of gathering the predicted object scores, we use mask areas as a proxy. + object_score_logits = torch.where( + (high_res_masks > 0).any(dim=(-1, -2)), 10.0, -10.0 + ) + + # Run the memory encoder on local slices for each GPU + start_idx_gpu = sum(tracker_metadata["num_obj_per_gpu"][: self.rank]) + start_idx_state = start_idx_gpu + for tracker_state in tracker_inference_states: + num_obj_per_state = len(tracker_state["obj_ids"]) + if num_obj_per_state == 0: + continue + # Get the local high-res masks and object score logits for this inference state + end_idx_state = start_idx_state + num_obj_per_state + local_high_res_masks = high_res_masks[start_idx_state:end_idx_state] + local_object_score_logits = object_score_logits[ + start_idx_state:end_idx_state + ] + local_batch_size = local_high_res_masks.size(0) + # Run Sam2 memory encoder. Note that we do not re-enforce the non-overlapping constraint as it is turned off by default + + encoded_mem = self.tracker._run_memory_encoder( + tracker_state, + frame_idx, + local_batch_size, + local_high_res_masks, + local_object_score_logits, + is_mask_from_pts=False, + ) + local_maskmem_features, local_maskmem_pos_enc = encoded_mem + # Store encoded memories in the local inference state + output_dict = tracker_state["output_dict"] + for storage_key in ["cond_frame_outputs", "non_cond_frame_outputs"]: + if frame_idx not in output_dict[storage_key]: + continue + output_dict[storage_key][frame_idx]["maskmem_features"] = ( + local_maskmem_features + ) + output_dict[storage_key][frame_idx]["maskmem_pos_enc"] = [ + pos for pos in local_maskmem_pos_enc + ] + # for batched inference state, we also need to add per-object + # memory slides to support instance interactivity + self.tracker._add_output_per_object( + inference_state=tracker_state, + frame_idx=frame_idx, + current_out=output_dict[storage_key][frame_idx], + storage_key=storage_key, + ) + start_idx_state += num_obj_per_state + + def _tracker_add_new_objects( + self, + frame_idx: int, + num_frames: int, + new_obj_ids: List[int], + new_obj_masks: Tensor, + tracker_states_local: List[Any], + orig_vid_height: int, + orig_vid_width: int, + feature_cache: Dict, + ): + """Add a new object to SAM2 inference states.""" + prev_tracker_state = ( + tracker_states_local[0] if len(tracker_states_local) > 0 else None + ) + + # prepare inference_state + # batch objects that first appear on the same frame together + # Clear inference state. Keep the cached image features if available. + new_tracker_state = self.tracker.init_state( + cached_features=feature_cache, + video_height=orig_vid_height, + video_width=orig_vid_width, + num_frames=num_frames, + ) + new_tracker_state["backbone_out"] = ( + prev_tracker_state.get("backbone_out", None) + if prev_tracker_state is not None + else None + ) + + assert len(new_obj_ids) == new_obj_masks.size(0) + assert new_obj_masks.is_floating_point() + input_mask_res = self.tracker.input_mask_size + new_obj_masks = F.interpolate( + new_obj_masks.unsqueeze(1), + size=(input_mask_res, input_mask_res), + mode="bilinear", + align_corners=False, + ).squeeze(1) + new_obj_masks = new_obj_masks > 0 + + # add object one by one + for new_obj_id, new_mask in zip(new_obj_ids, new_obj_masks): + self.tracker.add_new_mask( + inference_state=new_tracker_state, + frame_idx=frame_idx, + obj_id=new_obj_id, + mask=new_mask, + add_mask_to_memory=True, + ) + # NOTE: we skip enforcing the non-overlapping constraint **globally** when adding new objects. + self.tracker.propagate_in_video_preflight( + new_tracker_state, run_mem_encoder=True + ) + tracker_states_local.append(new_tracker_state) + return tracker_states_local + + def _tracker_remove_object(self, tracker_states_local: List[Any], obj_id: int): + """ + Remove an object from SAM2 inference states. This would remove the object from + all frames in the video. + """ + tracker_states_local_before_removal = tracker_states_local.copy() + tracker_states_local.clear() + for tracker_inference_state in tracker_states_local_before_removal: + # we try to remove `obj_id` on every inference state with `strict=False` + # it will not do anything if an inference state doesn't contain `obj_id` + new_obj_ids, _ = self.tracker.remove_object( + tracker_inference_state, obj_id, strict=False, need_output=False + ) + # only keep an inference state if it's non-empty after object removal + if len(new_obj_ids) > 0: + tracker_states_local.append(tracker_inference_state) + + def _tracker_remove_objects( + self, tracker_states_local: List[Any], obj_ids: list[int] + ): + """ + Remove an object from SAM2 inference states. This would remove the object from + all frames in the video. + """ + for obj_id in obj_ids: + self._tracker_remove_object(tracker_states_local, obj_id) + + def _initialize_metadata(self): + """Initialize metadata for the masklets.""" + tracker_metadata = { + "obj_ids_per_gpu": [np.array([], np.int64) for _ in range(self.world_size)], + "obj_ids_all_gpu": np.array([], np.int64), + "num_obj_per_gpu": np.zeros(self.world_size, np.int64), + "max_obj_id": -1, + "obj_id_to_score": {}, + "obj_id_to_tracker_score_frame_wise": defaultdict(dict), + "obj_id_to_last_occluded": {}, + } + if self.rank == 0: + # "rank0_metadata" contains metadata that is only stored on (and accessible to) GPU 0 + # - obj_first_frame_idx: obj_id --> first frame index where the object was detected + # - unmatched_frame_inds: obj_id --> [mismatched frame indices] + # - overlap_pair_to_frame_inds: (first_appear_obj_id, obj_id) --> [overlap frame indices] + # - removed_obj_ids: object IDs that are suppressed via hot-start + rank0_metadata = { + "obj_first_frame_idx": {}, + "unmatched_frame_inds": defaultdict(list), + "trk_keep_alive": defaultdict( + int + ), # This is used only for object suppression not for removal + "overlap_pair_to_frame_inds": defaultdict(list), + "removed_obj_ids": set(), + "suppressed_obj_ids": defaultdict( + set + ), # frame_idx --> set of objects with suppressed outputs, but still continue to be tracked + } + if self.masklet_confirmation_enable: + # all the following are npt.NDArray with the same shape as `obj_ids_all_gpu` + rank0_metadata["masklet_confirmation"] = { + # "status" is the confirmation status of each masklet (in `MaskletConfirmationStatus`) + "status": np.array([], np.int64), + # "consecutive_det_num" is the number of consecutive frames where the masklet is + # detected by the detector (with a matched detection) + "consecutive_det_num": np.array([], np.int64), + } + tracker_metadata["rank0_metadata"] = rank0_metadata + + return tracker_metadata + + def update_masklet_confirmation_status( + self, + rank0_metadata: Dict[str, Any], + obj_ids_all_gpu_prev: npt.NDArray, + obj_ids_all_gpu_updated: npt.NDArray, + det_to_matched_trk_obj_ids: Dict[int, npt.NDArray], + new_det_obj_ids: npt.NDArray, + ): + confirmation_data = rank0_metadata["masklet_confirmation"] + + # a) first, expand "confirmation_data" to include new masklets added in this frame + status_prev = confirmation_data["status"] + consecutive_det_num_prev = confirmation_data["consecutive_det_num"] + assert ( + status_prev.shape == obj_ids_all_gpu_prev.shape + ), f"Got {status_prev.shape} vs {obj_ids_all_gpu_prev.shape}" + + obj_id_to_updated_idx = { + obj_id: idx for idx, obj_id in enumerate(obj_ids_all_gpu_updated) + } + prev_elem_is_in_updated = np.isin(obj_ids_all_gpu_prev, obj_ids_all_gpu_updated) + prev_elem_obj_ids_in_updated = obj_ids_all_gpu_prev[prev_elem_is_in_updated] + prev_elem_inds_in_updated = np.array( + [obj_id_to_updated_idx[obj_id] for obj_id in prev_elem_obj_ids_in_updated], + dtype=np.int64, + ) + # newly added masklets are initialized to "UNCONFIRMED" status + unconfirmed_val = MaskletConfirmationStatus.UNCONFIRMED.value + status = np.full_like(obj_ids_all_gpu_updated, fill_value=unconfirmed_val) + status[prev_elem_inds_in_updated] = status_prev[prev_elem_is_in_updated] + consecutive_det_num = np.zeros_like(obj_ids_all_gpu_updated) + consecutive_det_num[prev_elem_inds_in_updated] = consecutive_det_num_prev[ + prev_elem_is_in_updated + ] + + # b) update the confirmation status of all masklets based on the current frame + # b.1) update "consecutive_det_num" + # "is_matched": whether a masklet is matched to a detection on this frame + is_matched = np.isin(obj_ids_all_gpu_updated, new_det_obj_ids) + for matched_trk_obj_ids in det_to_matched_trk_obj_ids.values(): + is_matched |= np.isin(obj_ids_all_gpu_updated, matched_trk_obj_ids) + consecutive_det_num = np.where(is_matched, consecutive_det_num + 1, 0) + + # b.2) update "status" + change_to_confirmed = ( + consecutive_det_num >= self.masklet_confirmation_consecutive_det_thresh + ) + status[change_to_confirmed] = MaskletConfirmationStatus.CONFIRMED.value + + confirmation_data["status"] = status + confirmation_data["consecutive_det_num"] = consecutive_det_num + return rank0_metadata + + def forward(self, input: BatchedDatapoint, is_inference: bool = False): + raise NotImplementedError("Evaluation outside demo is not implemented yet") + + def _load_checkpoint(self, ckpt_path: str, strict: bool = True): + sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"] + missing_keys, unexpected_keys = self.load_state_dict(sd, strict=strict) + if len(missing_keys) > 0 or len(unexpected_keys) > 0: + logger.warning(f"Loaded ckpt with {missing_keys=}, {unexpected_keys=}") + else: + logger.info("Loaded ckpt successfully without missing or unexpected keys") + + def prep_for_evaluator(self, video_frames, tracking_res, scores_labels): + """This method is only used for benchmark eval (not used in the demo).""" + num_frames = len(video_frames) + w, h = video_frames[0].size + zero_mask = torch.zeros((1, h, w), dtype=torch.bool) + object_ids = list(scores_labels.keys()) + preds = {"scores": [], "labels": [], "boxes": [], "masks_rle": []} + for oid in object_ids: + o_masks = [] + o_score = scores_labels[oid][0].item() + o_label = scores_labels[oid][1] + for frame_idx in range(num_frames): + if frame_idx not in tracking_res: + o_masks.append(zero_mask) + else: + o_masks.append(tracking_res[frame_idx].get(oid, zero_mask)) + + o_masks = torch.cat(o_masks, dim=0) # (n_frames, H, W) + preds["scores"].append(o_score) + preds["labels"].append(o_label) + preds["boxes"].append(mask_to_box(o_masks.unsqueeze(1)).squeeze()) + preds["masks_rle"].append(rle_encode(o_masks, return_areas=True)) + + preds["boxes"] = ( + torch.stack(preds["boxes"], dim=0) + if len(preds["boxes"]) > 0 + else torch.empty( + (0, num_frames, 4), dtype=torch.float32, device=self.device + ) + ) + preds["scores"] = ( + torch.tensor(preds["scores"], device=self.device) + if len(preds["scores"]) > 0 + else torch.empty((0,), device=self.device) + ) + preds["per_frame_scores"] = preds["scores"] + preds["labels"] = ( + torch.tensor(preds["labels"], device=self.device) + if len(preds["labels"]) > 0 + else torch.empty((0,), device=self.device) + ) + return preds + + def _encode_prompt(self, **kwargs): + return self.detector._encode_prompt(**kwargs) + + def _drop_new_det_with_obj_limit(self, new_det_fa_inds, det_scores_np, num_to_keep): + """ + Drop a few new detections based on the maximum number of objects. We drop new objects based + on their detection scores, keeping the high-scoring ones and dropping the low-scoring ones. + """ + assert 0 <= num_to_keep <= len(new_det_fa_inds) + if num_to_keep == 0: + return np.array([], np.int64) # keep none + if num_to_keep == len(new_det_fa_inds): + return new_det_fa_inds # keep all + + # keep the top-scoring detections + score_order = np.argsort(det_scores_np[new_det_fa_inds])[::-1] + new_det_fa_inds = new_det_fa_inds[score_order[:num_to_keep]] + return new_det_fa_inds diff --git a/detect_tools/sam3/sam3/model/sam3_video_inference.py b/detect_tools/sam3/sam3/model/sam3_video_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..7fb87d016a28efe63de2837692a4ba8fa01c004d --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_video_inference.py @@ -0,0 +1,1709 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +from collections import defaultdict + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn.functional as F + +from sam3 import perflib +from sam3.logger import get_logger +from sam3.model.act_ckpt_utils import clone_output_wrapper +from sam3.model.box_ops import box_xywh_to_cxcywh, box_xyxy_to_xywh +from sam3.model.data_misc import BatchedDatapoint, convert_my_tensors, FindStage +from sam3.model.geometry_encoders import Prompt +from sam3.model.io_utils import IMAGE_EXTS, load_resource_as_video_frames +from sam3.model.sam3_tracker_utils import fill_holes_in_mask_scores +from sam3.model.sam3_video_base import MaskletConfirmationStatus, Sam3VideoBase +from sam3.model.utils.misc import copy_data_to_device +from sam3.perflib.compile import compile_wrapper, shape_logging_wrapper +from sam3.perflib.masks_ops import masks_to_boxes as perf_masks_to_boxes +from torchvision.ops import masks_to_boxes +from tqdm.auto import tqdm + +logger = get_logger(__name__) + + +class Sam3VideoInference(Sam3VideoBase): + TEXT_ID_FOR_TEXT = 0 + TEXT_ID_FOR_VISUAL = 1 + + def __init__( + self, + image_size=1008, + image_mean=(0.5, 0.5, 0.5), + image_std=(0.5, 0.5, 0.5), + compile_model=False, + **kwargs, + ): + """ + hotstart_delay: int, the delay (in #frames) before the model starts to yield output, 0 to disable hotstart delay. + hotstart_unmatch_thresh: int, remove the object if it has this many unmatched frames within its hotstart_delay period. + If `hotstart_delay` is set to 0, this parameter is ignored. + hotstart_dup_thresh: int, remove the object if it has overlapped with another object this many frames within its hotstart_delay period. + """ + super().__init__(**kwargs) + self.image_size = image_size + self.image_mean = image_mean + self.image_std = image_std + self.compile_model = compile_model + + @torch.inference_mode() + def init_state( + self, + resource_path, + offload_video_to_cpu=False, + async_loading_frames=False, + video_loader_type="cv2", + ): + """Initialize an inference state from `resource_path` (an image or a video).""" + images, orig_height, orig_width = load_resource_as_video_frames( + resource_path=resource_path, + image_size=self.image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=self.image_mean, + img_std=self.image_std, + async_loading_frames=async_loading_frames, + video_loader_type=video_loader_type, + ) + inference_state = {} + inference_state["image_size"] = self.image_size + inference_state["num_frames"] = len(images) + # the original video height and width, used for resizing final output scores + inference_state["orig_height"] = orig_height + inference_state["orig_width"] = orig_width + # values that don't change across frames (so we only need to hold one copy of them) + inference_state["constants"] = {} + # inputs on each frame + self._construct_initial_input_batch(inference_state, images) + # initialize extra states + inference_state["tracker_inference_states"] = [] + inference_state["tracker_metadata"] = {} + inference_state["feature_cache"] = {} + inference_state["cached_frame_outputs"] = {} + inference_state["action_history"] = [] # for logging user actions + inference_state["is_image_only"] = is_image_type(resource_path) + return inference_state + + @torch.inference_mode() + def reset_state(self, inference_state): + """Revert `inference_state` to what it was right after initialization.""" + inference_state["input_batch"].find_text_batch[0] = "" + inference_state["text_prompt"] = None + for t in range(inference_state["num_frames"]): + inference_state["input_batch"].find_inputs[t].text_ids[...] = 0 + # constructing an output list in inference state (we start with an empty list) + inference_state["previous_stages_out"][t] = None + inference_state["per_frame_raw_point_input"][t] = None + inference_state["per_frame_raw_box_input"][t] = None + inference_state["per_frame_visual_prompt"][t] = None + inference_state["per_frame_geometric_prompt"][t] = None + inference_state["per_frame_cur_step"][t] = 0 + + inference_state["visual_prompt_embed"] = None + inference_state["visual_prompt_mask"] = None + inference_state["tracker_inference_states"].clear() + inference_state["tracker_metadata"].clear() + inference_state["feature_cache"].clear() + inference_state["cached_frame_outputs"].clear() + inference_state["action_history"].clear() # for logging user actions + + def _construct_initial_input_batch(self, inference_state, images): + """Construct an initial `BatchedDatapoint` instance as input.""" + # 1) img_batch + num_frames = len(images) + device = self.device + + # 2) find_text_batch + # "" will be replaced by the actual text prompt when adding prompts + find_text_batch = ["", "visual"] + + # 3) find_inputs + input_box_embedding_dim = 258 # historical default + input_points_embedding_dim = 257 # historical default + stages = [ + FindStage( + img_ids=[stage_id], + text_ids=[0], + input_boxes=[torch.zeros(input_box_embedding_dim)], + input_boxes_mask=[torch.empty(0, dtype=torch.bool)], + input_boxes_label=[torch.empty(0, dtype=torch.long)], + input_points=[torch.empty(0, input_points_embedding_dim)], + input_points_mask=[torch.empty(0)], + object_ids=[], + ) + for stage_id in range(num_frames) + ] + for i in range(len(stages)): + stages[i] = convert_my_tensors(stages[i]) + + # construct the final `BatchedDatapoint` and cast to GPU + input_batch = BatchedDatapoint( + img_batch=images, + find_text_batch=find_text_batch, + find_inputs=stages, + find_targets=[None] * num_frames, + find_metadatas=[None] * num_frames, + ) + input_batch = copy_data_to_device(input_batch, device, non_blocking=True) + inference_state["input_batch"] = input_batch + + # construct the placeholder interactive prompts and tracking queries + bs = 1 + inference_state["constants"]["empty_geometric_prompt"] = Prompt( + box_embeddings=torch.zeros(0, bs, 4, device=device), + box_mask=torch.zeros(bs, 0, device=device, dtype=torch.bool), + box_labels=torch.zeros(0, bs, device=device, dtype=torch.long), + point_embeddings=torch.zeros(0, bs, 2, device=device), + point_mask=torch.zeros(bs, 0, device=device, dtype=torch.bool), + point_labels=torch.zeros(0, bs, device=device, dtype=torch.long), + ) + + # constructing an output list in inference state (we start with an empty list) + inference_state["previous_stages_out"] = [None] * num_frames + inference_state["text_prompt"] = None + inference_state["per_frame_raw_point_input"] = [None] * num_frames + inference_state["per_frame_raw_box_input"] = [None] * num_frames + inference_state["per_frame_visual_prompt"] = [None] * num_frames + inference_state["per_frame_geometric_prompt"] = [None] * num_frames + inference_state["per_frame_cur_step"] = [0] * num_frames + + # placeholders for cached outputs + # (note: currently, a single visual prompt embedding is shared for all frames) + inference_state["visual_prompt_embed"] = None + inference_state["visual_prompt_mask"] = None + + def _get_visual_prompt(self, inference_state, frame_idx, boxes_cxcywh, box_labels): + """ + Handle the case of visual prompt. Currently, in the inference API we do not + explicitly distinguish between initial box as visual prompt vs subsequent boxes + or boxes after inference for refinement. + """ + # If the frame hasn't had any inference results before (prompting or propagation), + # we treat the first added box prompt as a visual prompt; otherwise, we treat + # the first box just as a refinement prompt. + is_new_visual_prompt = ( + inference_state["per_frame_visual_prompt"][frame_idx] is None + and inference_state["previous_stages_out"][frame_idx] is None + ) + if is_new_visual_prompt: + if boxes_cxcywh.size(0) != 1: + raise RuntimeError( + "visual prompts (box as an initial prompt) should only have one box, " + f"but got {boxes_cxcywh.shape=}" + ) + if not box_labels.item(): + logging.warning("A negative box is added as a visual prompt.") + # take the first box prompt as a visual prompt + device = self.device + new_visual_prompt = Prompt( + box_embeddings=boxes_cxcywh[None, 0:1, :].to(device), # (seq, bs, 4) + box_mask=None, + box_labels=box_labels[None, 0:1].to(device), # (seq, bs) + point_embeddings=None, + point_mask=None, + point_labels=None, + ) + inference_state["per_frame_visual_prompt"][frame_idx] = new_visual_prompt + else: + new_visual_prompt = None + + # `boxes_cxcywh` and `box_labels` contains all the raw box inputs added so far + # strip any visual prompt from the input boxes (for geometric prompt encoding) + if inference_state["per_frame_visual_prompt"][frame_idx] is not None: + boxes_cxcywh = boxes_cxcywh[1:] + box_labels = box_labels[1:] + + return boxes_cxcywh, box_labels, new_visual_prompt + + def _get_processing_order( + self, inference_state, start_frame_idx, max_frame_num_to_track, reverse + ): + num_frames = inference_state["num_frames"] + previous_stages_out = inference_state["previous_stages_out"] + if all(out is None for out in previous_stages_out) and start_frame_idx is None: + raise RuntimeError( + "No prompts are received on any frames. Please add prompt on at least one frame before propagation." + ) + # set start index, end index, and processing order + if start_frame_idx is None: + # default: start from the earliest frame with input points + start_frame_idx = min( + t for t, out in enumerate(previous_stages_out) if out is not None + ) + if max_frame_num_to_track is None: + # default: track all the frames in the video + max_frame_num_to_track = num_frames + if reverse: + end_frame_idx = start_frame_idx - max_frame_num_to_track + end_frame_idx = max(end_frame_idx, 0) + processing_order = range(start_frame_idx - 1, end_frame_idx - 1, -1) + else: + end_frame_idx = start_frame_idx + max_frame_num_to_track + end_frame_idx = min(end_frame_idx, num_frames - 1) + processing_order = range(start_frame_idx, end_frame_idx + 1) + return processing_order, end_frame_idx + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx=None, + max_frame_num_to_track=None, + reverse=False, + ): + """ + Propagate the prompts to get grounding results for the entire video. This method + is a generator and yields inference outputs for all frames in the range specified + by `start_frame_idx`, `max_frame_num_to_track`, and `reverse`. + """ + # compile the model (it's a no-op if the model is already compiled) + # note that it's intentionally added to `self.propagate_in_video`, so that the first + # `self.add_prompt` call will be done in eager mode to fill in the decoder buffers + # such as positional encoding cache) + self._compile_model() + + processing_order, end_frame_idx = self._get_processing_order( + inference_state, + start_frame_idx, + max_frame_num_to_track, + reverse=reverse, + ) + + # Store max_frame_num_to_track in feature_cache for downstream methods + inference_state["feature_cache"]["tracking_bounds"] = { + "max_frame_num_to_track": max_frame_num_to_track, + "propagate_in_video_start_frame_idx": start_frame_idx, + } + + hotstart_buffer = [] + hotstart_removed_obj_ids = set() + # when deciding whether to output a masklet on `yield_frame_idx`, we check whether the object is confirmed + # in a future frame (`unconfirmed_frame_delay` frames after the current frame). For example, if we require + # an object to be detected in 3 consecutive frames to be confirmed, then we look 2 frames in the future -- + # e.g., we output an object on frame 4 only if it becomes confirmed on frame 6. + unconfirmed_status_delay = self.masklet_confirmation_consecutive_det_thresh - 1 + unconfirmed_obj_ids_per_frame = {} # frame_idx -> hidden_obj_ids + for frame_idx in tqdm( + processing_order, desc="propagate_in_video", disable=self.rank > 0 + ): + out = self._run_single_frame_inference(inference_state, frame_idx, reverse) + + if self.hotstart_delay > 0: + # accumulate the outputs for the first `hotstart_delay` frames + hotstart_buffer.append([frame_idx, out]) + # update the object IDs removed by hotstart so that we don't output them + if self.rank == 0: + hotstart_removed_obj_ids.update(out["removed_obj_ids"]) + unconfirmed_obj_ids = out.get("unconfirmed_obj_ids", None) + if unconfirmed_obj_ids is not None: + unconfirmed_obj_ids_per_frame[frame_idx] = unconfirmed_obj_ids + + if frame_idx == end_frame_idx: + # we reached the end of propagation -- yield all frames in the buffer + yield_list = hotstart_buffer + hotstart_buffer = [] + elif len(hotstart_buffer) >= self.hotstart_delay: + # we have enough frames -- yield and remove the first (oldest) frame from the buffer + yield_list = hotstart_buffer[:1] + hotstart_buffer = hotstart_buffer[1:] + else: + # not enough frames yet -- skip yielding + yield_list = [] + else: + yield_list = [(frame_idx, out)] # output the current frame + + for yield_frame_idx, yield_out in yield_list: + # post-process the output and yield it + if self.rank == 0: + suppressed_obj_ids = yield_out["suppressed_obj_ids"] + unconfirmed_status_frame_idx = ( + yield_frame_idx + unconfirmed_status_delay + if not reverse + else yield_frame_idx - unconfirmed_status_delay + ) + + # Clamp the frame index to stay within video bounds + num_frames = inference_state["num_frames"] + unconfirmed_status_frame_idx = max( + 0, min(unconfirmed_status_frame_idx, num_frames - 1) + ) + + unconfirmed_obj_ids = unconfirmed_obj_ids_per_frame.get( + unconfirmed_status_frame_idx, None + ) + postprocessed_out = self._postprocess_output( + inference_state, + yield_out, + hotstart_removed_obj_ids, + suppressed_obj_ids, + unconfirmed_obj_ids, + ) + + self._cache_frame_outputs( + inference_state, + yield_frame_idx, + yield_out["obj_id_to_mask"], + suppressed_obj_ids=suppressed_obj_ids, + removed_obj_ids=hotstart_removed_obj_ids, + unconfirmed_obj_ids=unconfirmed_obj_ids, + ) + else: + postprocessed_out = None # no output on other GPUs + yield yield_frame_idx, postprocessed_out + + def _run_single_frame_inference(self, inference_state, frame_idx, reverse): + """ + Perform inference on a single frame and get its inference results. This would + also update `inference_state`. + """ + # prepare inputs + input_batch = inference_state["input_batch"] + tracker_states_local = inference_state["tracker_inference_states"] + has_text_prompt = inference_state["text_prompt"] is not None + has_geometric_prompt = ( + inference_state["per_frame_geometric_prompt"][frame_idx] is not None + ) + # run inference for the current frame + ( + obj_id_to_mask, + obj_id_to_score, + tracker_states_local_new, + tracker_metadata_new, + frame_stats, + _, + ) = self._det_track_one_frame( + frame_idx=frame_idx, + num_frames=inference_state["num_frames"], + reverse=reverse, + input_batch=input_batch, + geometric_prompt=( + inference_state["constants"]["empty_geometric_prompt"] + if not has_geometric_prompt + else inference_state["per_frame_geometric_prompt"][frame_idx] + ), + tracker_states_local=tracker_states_local, + tracker_metadata_prev=inference_state["tracker_metadata"], + feature_cache=inference_state["feature_cache"], + orig_vid_height=inference_state["orig_height"], + orig_vid_width=inference_state["orig_width"], + is_image_only=inference_state["is_image_only"], + allow_new_detections=has_text_prompt or has_geometric_prompt, + ) + # update inference state + inference_state["tracker_inference_states"] = tracker_states_local_new + inference_state["tracker_metadata"] = tracker_metadata_new + # use a dummy string in "previous_stages_out" to indicate this frame has outputs + inference_state["previous_stages_out"][frame_idx] = "_THIS_FRAME_HAS_OUTPUTS_" + + if self.rank == 0: + self._cache_frame_outputs(inference_state, frame_idx, obj_id_to_mask) + + out = { + "obj_id_to_mask": obj_id_to_mask, + "obj_id_to_score": obj_id_to_score, # first frame detection score + "obj_id_to_tracker_score": tracker_metadata_new[ + "obj_id_to_tracker_score_frame_wise" + ][frame_idx], + } + # removed_obj_ids is only needed on rank 0 to handle hotstart delay buffer + if self.rank == 0: + rank0_metadata = tracker_metadata_new["rank0_metadata"] + removed_obj_ids = rank0_metadata["removed_obj_ids"] + out["removed_obj_ids"] = removed_obj_ids + out["suppressed_obj_ids"] = rank0_metadata["suppressed_obj_ids"][frame_idx] + out["frame_stats"] = frame_stats + if self.masklet_confirmation_enable: + status = rank0_metadata["masklet_confirmation"]["status"] + is_unconfirmed = status == MaskletConfirmationStatus.UNCONFIRMED.value + out["unconfirmed_obj_ids"] = tracker_metadata_new["obj_ids_all_gpu"][ + is_unconfirmed + ].tolist() + else: + out["unconfirmed_obj_ids"] = [] + + return out + + def _postprocess_output( + self, + inference_state, + out, + removed_obj_ids=None, + suppressed_obj_ids=None, + unconfirmed_obj_ids=None, + ): + obj_id_to_mask = out["obj_id_to_mask"] # low res masks + curr_obj_ids = sorted(obj_id_to_mask.keys()) + H_video, W_video = inference_state["orig_height"], inference_state["orig_width"] + if len(curr_obj_ids) == 0: + out_obj_ids = torch.zeros(0, dtype=torch.int64) + out_probs = torch.zeros(0, dtype=torch.float32) + out_binary_masks = torch.zeros(0, H_video, W_video, dtype=torch.bool) + out_boxes_xywh = torch.zeros(0, 4, dtype=torch.float32) + else: + out_obj_ids = torch.tensor(curr_obj_ids, dtype=torch.int64) + out_probs = torch.tensor( + [out["obj_id_to_score"][obj_id] for obj_id in curr_obj_ids] + ) + out_tracker_probs = torch.tensor( + [ + ( + out["obj_id_to_tracker_score"][obj_id] + if obj_id in out["obj_id_to_tracker_score"] + else 0.0 + ) + for obj_id in curr_obj_ids + ] + ) + out_binary_masks = torch.cat( + [obj_id_to_mask[obj_id] for obj_id in curr_obj_ids], dim=0 + ) + + assert out_binary_masks.dtype == torch.bool + keep = out_binary_masks.any(dim=(1, 2)).cpu() # remove masks with 0 areas + # hide outputs for those object IDs in `obj_ids_to_hide` + obj_ids_to_hide = [] + if suppressed_obj_ids is not None: + obj_ids_to_hide.extend(suppressed_obj_ids) + if removed_obj_ids is not None: + obj_ids_to_hide.extend(removed_obj_ids) + if unconfirmed_obj_ids is not None: + obj_ids_to_hide.extend(unconfirmed_obj_ids) + if len(obj_ids_to_hide) > 0: + obj_ids_to_hide_t = torch.tensor(obj_ids_to_hide, dtype=torch.int64) + keep &= ~torch.isin(out_obj_ids, obj_ids_to_hide_t) + + # slice those valid entries from the original outputs + keep_idx = torch.nonzero(keep, as_tuple=True)[0] + keep_idx_gpu = keep_idx.pin_memory().to( + device=out_binary_masks.device, non_blocking=True + ) + + out_obj_ids = torch.index_select(out_obj_ids, 0, keep_idx) + out_probs = torch.index_select(out_probs, 0, keep_idx) + out_tracker_probs = torch.index_select(out_tracker_probs, 0, keep_idx) + out_binary_masks = torch.index_select(out_binary_masks, 0, keep_idx_gpu) + + if perflib.is_enabled: + out_boxes_xyxy = perf_masks_to_boxes( + out_binary_masks, out_obj_ids.tolist() + ) + else: + out_boxes_xyxy = masks_to_boxes(out_binary_masks) + + out_boxes_xywh = box_xyxy_to_xywh(out_boxes_xyxy) # convert to xywh format + # normalize boxes + out_boxes_xywh[..., 0] /= W_video + out_boxes_xywh[..., 1] /= H_video + out_boxes_xywh[..., 2] /= W_video + out_boxes_xywh[..., 3] /= H_video + + # apply non-overlapping constraints on the existing masklets + if out_binary_masks.shape[0] > 1: + assert len(out_binary_masks) == len(out_tracker_probs) + out_binary_masks = ( + self.tracker._apply_object_wise_non_overlapping_constraints( + out_binary_masks.unsqueeze(1), + out_tracker_probs.unsqueeze(1).to(out_binary_masks.device), + background_value=0, + ).squeeze(1) + ) > 0 + + outputs = { + "out_obj_ids": out_obj_ids.cpu().numpy(), + "out_probs": out_probs.cpu().numpy(), + "out_boxes_xywh": out_boxes_xywh.cpu().numpy(), + "out_binary_masks": out_binary_masks.cpu().numpy(), + "frame_stats": out.get("frame_stats", None), + } + return outputs + + def _cache_frame_outputs( + self, + inference_state, + frame_idx, + obj_id_to_mask, + suppressed_obj_ids=None, + removed_obj_ids=None, + unconfirmed_obj_ids=None, + ): + # Filter out suppressed, removed, and unconfirmed objects from the cache + filtered_obj_id_to_mask = obj_id_to_mask.copy() + + objects_to_exclude = set() + if suppressed_obj_ids is not None: + objects_to_exclude.update(suppressed_obj_ids) + if removed_obj_ids is not None: + objects_to_exclude.update(removed_obj_ids) + if unconfirmed_obj_ids is not None: + objects_to_exclude.update(unconfirmed_obj_ids) + + if objects_to_exclude: + for obj_id in objects_to_exclude: + if obj_id in filtered_obj_id_to_mask: + del filtered_obj_id_to_mask[obj_id] + + inference_state["cached_frame_outputs"][frame_idx] = filtered_obj_id_to_mask + + def _build_tracker_output( + self, inference_state, frame_idx, refined_obj_id_to_mask=None + ): + assert ( + "cached_frame_outputs" in inference_state + and frame_idx in inference_state["cached_frame_outputs"] + ), "No cached outputs found. Ensure normal propagation has run first to populate the cache." + cached_outputs = inference_state["cached_frame_outputs"][frame_idx] + + obj_id_to_mask = cached_outputs.copy() + + # Update with refined masks if provided + if refined_obj_id_to_mask is not None: + for obj_id, refined_mask in refined_obj_id_to_mask.items(): + assert ( + refined_mask is not None + ), f"Refined mask data must be provided for obj_id {obj_id}" + obj_id_to_mask[obj_id] = refined_mask + + return obj_id_to_mask + + def _compile_model(self): + """Compile the SAM model with torch.compile for speedup.""" + is_compiled = getattr(self, "_model_is_compiled", False) + if is_compiled or not self.compile_model: + return + + import torch._dynamo + + # a larger cache size to hold varying number of shapes for torch.compile + # see https://github.com/pytorch/pytorch/blob/v2.5.1/torch/_dynamo/config.py#L42-L49 + torch._dynamo.config.cache_size_limit = 128 + torch._dynamo.config.accumulated_cache_size_limit = 2048 + torch._dynamo.config.capture_scalar_outputs = True + torch._dynamo.config.suppress_errors = True + + # Compile module components + # skip compilation of `_encode_prompt` since it sometimes tiggger SymInt errors + # self._encode_prompt = clone_output_wrapper( + # torch.compile(self._encode_prompt, fullgraph=True, mode="max-autotune") + # ) + + ## Compile SAM3 model components + self.detector.backbone.vision_backbone.forward = clone_output_wrapper( + torch.compile( + self.detector.backbone.vision_backbone.forward, + fullgraph=True, + mode="max-autotune", + ) + ) + self.detector.transformer.encoder.forward = clone_output_wrapper( + torch.compile( + self.detector.transformer.encoder.forward, + fullgraph=True, + mode="max-autotune", + ) + ) + self.detector.transformer.decoder.forward = clone_output_wrapper( + torch.compile( + self.detector.transformer.decoder.forward, + fullgraph=True, + mode="max-autotune", + dynamic=False, + ) + ) + + self.detector.segmentation_head.forward = clone_output_wrapper( + torch.compile( + self.detector.segmentation_head.forward, + fullgraph=True, + mode="max-autotune", + ) + ) + + ## Compile Tracker model components + self.tracker.maskmem_backbone.forward = compile_wrapper( + self.tracker.maskmem_backbone.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + + self.tracker.transformer.encoder.forward = shape_logging_wrapper( + compile_wrapper( + self.tracker.transformer.encoder.forward, + mode="max-autotune-no-cudagraphs", + fullgraph=True, + dynamic=True, + ), + keep_kwargs=["src", "src_pos", "prompt", "prompt_pos"], + ) + + self.tracker.sam_mask_decoder.forward = compile_wrapper( + self.tracker.sam_mask_decoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, # Accuracy regression on True + ) + + self._model_is_compiled = True + + def _warm_up_vg_propagation(self, inference_state, start_frame_idx=0): + # use different tracking score thresholds for each round to simulate different number of output objects + num_objects_list = range(self.num_obj_for_compile + 1) + new_det_score_thresh_list = [0.3, 0.5, 0.7] + num_rounds = len(new_det_score_thresh_list) + orig_new_det_thresh = self.new_det_thresh + + for i, thresh in enumerate(new_det_score_thresh_list): + self.new_det_thresh = thresh + for num_objects in num_objects_list: + logger.info(f"{i+1}/{num_rounds} warming up model compilation") + self.add_prompt( + inference_state, frame_idx=start_frame_idx, text_str="cat" + ) + logger.info( + f"{i+1}/{num_rounds} warming up model compilation -- simulating {num_objects}/{self.num_obj_for_compile} objects" + ) + inference_state = self.add_fake_objects_to_inference_state( + inference_state, num_objects, frame_idx=start_frame_idx + ) + inference_state["tracker_metadata"]["rank0_metadata"].update( + { + "masklet_confirmation": { + "status": np.zeros(num_objects, dtype=np.int64), + "consecutive_det_num": np.zeros( + num_objects, dtype=np.int64 + ), + } + } + ) + for _ in self.propagate_in_video( + inference_state, start_frame_idx, reverse=False + ): + pass + for _ in self.propagate_in_video( + inference_state, start_frame_idx, reverse=True + ): + pass + self.reset_state(inference_state) + logger.info( + f"{i+1}/{num_rounds} warming up model compilation -- completed round {i+1} out of {num_rounds}" + ) + + # Warm up Tracker memory encoder with varying input shapes + num_iters = 3 + feat_size = self.tracker.sam_image_embedding_size**2 # 72 * 72 = 5184 + hidden_dim = self.tracker.hidden_dim # 256 + mem_dim = self.tracker.mem_dim # 64 + for _ in tqdm(range(num_iters)): + for b in range(1, self.num_obj_for_compile + 1): + for i in range( + 1, + self.tracker.max_cond_frames_in_attn + self.tracker.num_maskmem, + ): + for j in range( + self.tracker.max_cond_frames_in_attn + + self.tracker.max_obj_ptrs_in_encoder + ): + num_obj_ptr_tokens = (hidden_dim // mem_dim) * j + src = torch.randn(feat_size, b, hidden_dim, device=self.device) + src_pos = torch.randn( + feat_size, b, hidden_dim, device=self.device + ) + prompt = torch.randn( + feat_size * i + num_obj_ptr_tokens, + b, + mem_dim, + device=self.device, + ) + prompt_pos = torch.randn( + feat_size * i + num_obj_ptr_tokens, + b, + mem_dim, + device=self.device, + ) + + self.tracker.transformer.encoder.forward( + src=src, + src_pos=src_pos, + prompt=prompt, + prompt_pos=prompt_pos, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + + self.new_det_thresh = orig_new_det_thresh + return inference_state + + def add_fake_objects_to_inference_state( + self, inference_state, num_objects, frame_idx + ): + new_det_obj_ids_local = np.arange(num_objects) + high_res_H, high_res_W = ( + self.tracker.maskmem_backbone.mask_downsampler.interpol_size + ) + new_det_masks = torch.ones( + len(new_det_obj_ids_local), high_res_H, high_res_W + ).to(self.device) + + inference_state["tracker_inference_states"] = self._tracker_add_new_objects( + frame_idx=frame_idx, + num_frames=inference_state["num_frames"], + new_obj_ids=new_det_obj_ids_local, + new_obj_masks=new_det_masks, + tracker_states_local=inference_state["tracker_inference_states"], + orig_vid_height=inference_state["orig_height"], + orig_vid_width=inference_state["orig_width"], + feature_cache=inference_state["feature_cache"], + ) + + # Synthesize obj_id_to_mask data for cached_frame_outputs to support _build_tracker_output during warmup + obj_id_to_mask = {} + if num_objects > 0: + H_video = inference_state["orig_height"] + W_video = inference_state["orig_width"] + + video_res_masks = F.interpolate( + new_det_masks.unsqueeze(1), # Add channel dimension for interpolation + size=(H_video, W_video), + mode="bilinear", + align_corners=False, + ) # (num_objects, 1, H_video, W_video) + for i, obj_id in enumerate(new_det_obj_ids_local): + obj_id_to_mask[obj_id] = (video_res_masks[i] > 0.0).to(torch.bool) + if self.rank == 0: + for fidx in range(inference_state["num_frames"]): + self._cache_frame_outputs(inference_state, fidx, obj_id_to_mask) + + inference_state["tracker_metadata"].update( + { + "obj_ids_per_gpu": [np.arange(num_objects)], + "obj_ids_all_gpu": np.arange(num_objects), # Same as 1 GPU + "num_obj_per_gpu": [num_objects], + "obj_id_to_score": {i: 1.0 for i in range(num_objects)}, + "max_obj_id": num_objects, + "rank0_metadata": { + "masklet_confirmation": { + "status": np.zeros(num_objects, dtype=np.int64), + "consecutive_det_num": np.zeros(num_objects, dtype=np.int64), + }, + "removed_obj_ids": set(), + "suppressed_obj_ids": defaultdict(set), + }, + } + ) + return inference_state + + @torch.inference_mode() + @torch.autocast(device_type="cuda", dtype=torch.bfloat16) + def warm_up_compilation(self): + """ + Warm up the model by running a dummy inference to compile the model. This is + useful to avoid the compilation overhead in the first inference call. + """ + if not self.compile_model: + return + self._warm_up_complete = False + if self.device.type != "cuda": + raise RuntimeError( + f"The model must be on CUDA for warm-up compilation, got {self.device=}." + ) + + # temporally set to single GPU temporarily for warm-up compilation + orig_rank = self.rank + orig_world_size = self.world_size + self.rank = self.detector.rank = 0 + self.world_size = self.detector.world_size = 1 + orig_recondition_every_nth_frame = self.recondition_every_nth_frame + # self.recondition_every_nth_frame = 2 + + # Get a random video + inference_state = self.init_state(resource_path="") + start_frame_idx = 0 + + # Run basic propagation warm-up + inference_state = self._warm_up_vg_propagation(inference_state, start_frame_idx) + + logger.info("Warm-up compilation completed.") + + # revert to the original GPU and rank + self.rank = self.detector.rank = orig_rank + self.world_size = self.detector.world_size = orig_world_size + self.recondition_every_nth_frame = orig_recondition_every_nth_frame + self._warm_up_complete = True + self.tracker.transformer.encoder.forward.set_logging(True) + + @torch.inference_mode() + def add_prompt( + self, + inference_state, + frame_idx, + text_str=None, + boxes_xywh=None, + box_labels=None, + ): + """ + Add text, point or box prompts on a single frame. This method returns the inference + outputs only on the prompted frame. + + Note that text prompts are NOT associated with a particular frame (i.e. they apply + to all frames). However, we only run inference on the frame specified in `frame_idx`. + """ + logger.debug("Running add_prompt on frame %d", frame_idx) + + num_frames = inference_state["num_frames"] + assert ( + text_str is not None or boxes_xywh is not None + ), "at least one type of prompt (text, boxes) must be provided" + assert ( + 0 <= frame_idx < num_frames + ), f"{frame_idx=} is out of range for a total of {num_frames} frames" + + # since it's a semantic prompt, we start over + self.reset_state(inference_state) + + # 1) add text prompt + if text_str is not None and text_str != "visual": + inference_state["text_prompt"] = text_str + inference_state["input_batch"].find_text_batch[0] = text_str + text_id = self.TEXT_ID_FOR_TEXT + else: + inference_state["text_prompt"] = None + inference_state["input_batch"].find_text_batch[0] = "" + text_id = self.TEXT_ID_FOR_VISUAL + for t in range(inference_state["num_frames"]): + inference_state["input_batch"].find_inputs[t].text_ids[...] = text_id + + # 2) handle box prompt + assert (boxes_xywh is not None) == (box_labels is not None) + if boxes_xywh is not None: + boxes_xywh = torch.as_tensor(boxes_xywh, dtype=torch.float32) + box_labels = torch.as_tensor(box_labels, dtype=torch.long) + # input boxes are expected to be [xmin, ymin, width, height] format + # in normalized coordinates of range 0~1, similar to FA + assert boxes_xywh.dim() == 2 + assert boxes_xywh.size(0) > 0 and boxes_xywh.size(-1) == 4 + assert box_labels.dim() == 1 and box_labels.size(0) == boxes_xywh.size(0) + boxes_cxcywh = box_xywh_to_cxcywh(boxes_xywh) + assert (boxes_xywh >= 0).all().item() and (boxes_xywh <= 1).all().item() + assert (boxes_cxcywh >= 0).all().item() and (boxes_cxcywh <= 1).all().item() + + new_box_input = boxes_cxcywh, box_labels + inference_state["per_frame_raw_box_input"][frame_idx] = new_box_input + + # handle the case of visual prompt (also added as an input box from the UI) + boxes_cxcywh, box_labels, geometric_prompt = self._get_visual_prompt( + inference_state, frame_idx, boxes_cxcywh, box_labels + ) + + inference_state["per_frame_geometric_prompt"][frame_idx] = geometric_prompt + + out = self._run_single_frame_inference( + inference_state, frame_idx, reverse=False + ) + return frame_idx, self._postprocess_output(inference_state, out) + + @torch.autocast(device_type="cuda", dtype=torch.bfloat16) + def forward(self, input: BatchedDatapoint, is_inference: bool = False): + """This method is only used for benchmark eval (not used in the demo).""" + # set the model to single GPU for benchmark evaluation (to be compatible with trainer) + orig_rank = self.rank + orig_world_size = self.world_size + self.rank = self.detector.rank = 0 + self.world_size = self.detector.world_size = 1 + + # get data + text_prompt_ids = input.find_metadatas[0].original_category_id + text_prompt_list = input.find_text_batch + + # loop over txt prompts + tracking_res = defaultdict(dict) # frame_idx --> {obj_id: mask} + scores_labels = defaultdict(tuple) # obj_id --> (score, text_prompt_id) + inference_state = self.init_state(resource_path=input.raw_images) + for prompt_id, prompt in zip(text_prompt_ids, text_prompt_list): + self.add_prompt(inference_state, frame_idx=0, text_str=prompt) + start_obj_id = max(scores_labels.keys(), default=-1) + 1 # prev max + 1 + + # propagate the prompts + obj_ids_this_prompt = set() + for frame_idx, out in self.propagate_in_video( + inference_state, + start_frame_idx=0, + max_frame_num_to_track=inference_state["num_frames"], + reverse=False, + ): + current_frame_res = tracking_res[frame_idx] + for obj_id, mask in zip(out["out_obj_ids"], out["out_binary_masks"]): + mask_tensor = torch.tensor(mask[None], dtype=torch.bool) + current_frame_res[obj_id + start_obj_id] = mask_tensor + obj_ids_this_prompt.update(current_frame_res.keys()) + + obj_id_to_score = inference_state["tracker_metadata"]["obj_id_to_score"] + for obj_id, score in obj_id_to_score.items(): + if obj_id + start_obj_id in obj_ids_this_prompt: + score_tensor = torch.tensor(score, dtype=torch.float32) + scores_labels[obj_id + start_obj_id] = (score_tensor, prompt_id) + + self.reset_state(inference_state) + + video_id = input.find_metadatas[0].original_image_id[0].cpu().item() + preds = self.prep_for_evaluator(input.raw_images, tracking_res, scores_labels) + + # revert the model to the original GPU and rank + self.rank = self.detector.rank = orig_rank + self.world_size = self.detector.world_size = orig_world_size + return {video_id: preds} + + def back_convert(self, targets): + # Needed for retraining compatibility with trainer + return targets + + +class Sam3VideoInferenceWithInstanceInteractivity(Sam3VideoInference): + def __init__( + self, + use_prev_mem_frame=False, + use_stateless_refinement=False, + refinement_detector_cond_frame_removal_window=16, + **kwargs, + ): + """ + use_prev_mem_frame: bool, whether to condition on previous memory frames for adding points + use_stateless_refinement: bool, whether to enable stateless refinement behavior + refinement_detector_cond_frame_removal_window: int, we remove a detector conditioning frame if it + is within this many frames of a user refined frame. Set to a large value (e.g. 10000) to + always remove detector conditioning frames if there is any user refinement in the video. + """ + super().__init__(**kwargs) + self.use_prev_mem_frame = use_prev_mem_frame + self.use_stateless_refinement = use_stateless_refinement + self.refinement_detector_cond_frame_removal_window = ( + refinement_detector_cond_frame_removal_window + ) + + def _init_new_tracker_state(self, inference_state): + return self.tracker.init_state( + cached_features=inference_state["feature_cache"], + video_height=inference_state["orig_height"], + video_width=inference_state["orig_width"], + num_frames=inference_state["num_frames"], + ) + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx=None, + max_frame_num_to_track=None, + reverse=False, + ): + # step 1: check which type of propagation to run, should be the same for all GPUs. + propagation_type, obj_ids = self.parse_action_history_for_propagation( + inference_state + ) + self.add_action_history( + inference_state, + action_type=propagation_type, + obj_ids=obj_ids, + frame_idx=start_frame_idx, + ) + + # step 2: run full VG propagation + if propagation_type == "propagation_full": + logger.debug(f"Running full VG propagation (reverse={reverse}).") + yield from super().propagate_in_video( + inference_state, + start_frame_idx=start_frame_idx, + max_frame_num_to_track=max_frame_num_to_track, + reverse=reverse, + ) + return + + # step 3: run Tracker partial propagation or direct fetch existing predictions + assert propagation_type in ["propagation_partial", "propagation_fetch"] + logger.debug( + f"Running Tracker propagation for objects {obj_ids} and merging it with existing VG predictions (reverse={reverse})." + if propagation_type == "propagation_partial" + else f"Fetching existing VG predictions without running any propagation (reverse={reverse})." + ) + processing_order, _ = self._get_processing_order( + inference_state, + start_frame_idx=start_frame_idx, + max_frame_num_to_track=max_frame_num_to_track, + reverse=reverse, + ) + + tracker_metadata = inference_state["tracker_metadata"] + + # if fetch just return from output + if propagation_type == "propagation_fetch": + for frame_idx in tqdm(processing_order): + if self.rank == 0: + obj_id_to_mask = inference_state["cached_frame_outputs"].get( + frame_idx, {} + ) + # post processing - remove suppressed obj_ids + obj_id_to_score = tracker_metadata["obj_id_to_score"] + suppressed_obj_ids = tracker_metadata["rank0_metadata"][ + "suppressed_obj_ids" + ][frame_idx] + obj_id_to_tracker_score = tracker_metadata[ + "obj_id_to_tracker_score_frame_wise" + ][frame_idx] + + out = { + "obj_id_to_mask": obj_id_to_mask, + "obj_id_to_score": obj_id_to_score, + "obj_id_to_tracker_score": obj_id_to_tracker_score, + } + yield ( + frame_idx, + self._postprocess_output( + inference_state, out, suppressed_obj_ids=suppressed_obj_ids + ), + ) + else: + yield frame_idx, None + + return + + # get Tracker inference states containing selected obj_ids + if propagation_type == "propagation_partial": + # can be empty for GPUs where objects are not in their inference states + tracker_states_local = self._get_tracker_inference_states_by_obj_ids( + inference_state, obj_ids + ) + for tracker_state in tracker_states_local: + self.tracker.propagate_in_video_preflight( + tracker_state, run_mem_encoder=True + ) + + for frame_idx in tqdm(processing_order): + # run Tracker propagation + if propagation_type == "propagation_partial": + self._prepare_backbone_feats(inference_state, frame_idx, reverse) + obj_ids_local, low_res_masks_local, tracker_scores_local = ( + self._propogate_tracker_one_frame_local_gpu( + tracker_states_local, + frame_idx=frame_idx, + reverse=reverse, + run_mem_encoder=True, + ) + ) + + # broadcast refined object tracker scores and masks to all GPUs + # handle multiple objects that can be located on different GPUs + refined_obj_data = {} # obj_id -> (score, mask_video_res) + + # Collect data for objects on this GPU + local_obj_data = {} + for obj_id in obj_ids: + obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id) + if self.rank == obj_rank and obj_id in obj_ids_local: + refined_obj_idx = obj_ids_local.index(obj_id) + refined_mask_low_res = low_res_masks_local[ + refined_obj_idx + ] # (H_low_res, W_low_res) + refined_score = tracker_scores_local[refined_obj_idx] + + # Keep low resolution for broadcasting to reduce communication cost + local_obj_data[obj_id] = (refined_score, refined_mask_low_res) + + # Broadcast data from each GPU that has refined objects + if self.world_size > 1: + for obj_id in obj_ids: + obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id) + if self.rank == obj_rank: + # This GPU has the object, broadcast its data + data_to_broadcast = local_obj_data.get(obj_id, None) + data_list = [ + (data_to_broadcast[0].cpu(), data_to_broadcast[1].cpu()) + ] + self.broadcast_python_obj_cpu(data_list, src=obj_rank) + if data_to_broadcast is not None: + refined_obj_data[obj_id] = data_to_broadcast + elif self.rank != obj_rank: + # This GPU doesn't have the object, receive data + data_list = [None] + self.broadcast_python_obj_cpu(data_list, src=obj_rank) + refined_obj_data[obj_id] = ( + data_list[0][0].to(self.device), + data_list[0][1].to(self.device), + ) + else: + # Single GPU case + refined_obj_data = local_obj_data + + # Update Tracker scores for all refined objects + for obj_id, (refined_score, _) in refined_obj_data.items(): + tracker_metadata["obj_id_to_tracker_score_frame_wise"][ + frame_idx + ].update({obj_id: refined_score.item()}) + + if self.rank == 0: + # get predictions from Tracker inference states, it includes the original + # VG predictions and the refined predictions from interactivity. + + # Prepare refined masks dictionary - upscale to video resolution after broadcast + refined_obj_id_to_mask = {} + for obj_id, (_, refined_mask_low_res) in refined_obj_data.items(): + refined_mask_video_res = ( + self._convert_low_res_mask_to_video_res( + refined_mask_low_res, inference_state + ) + ) # (1, H_video, W_video) bool + refined_obj_id_to_mask[obj_id] = refined_mask_video_res + + obj_id_to_mask = self._build_tracker_output( + inference_state, frame_idx, refined_obj_id_to_mask + ) + out = { + "obj_id_to_mask": obj_id_to_mask, + "obj_id_to_score": tracker_metadata["obj_id_to_score"], + "obj_id_to_tracker_score": tracker_metadata[ + "obj_id_to_tracker_score_frame_wise" + ][frame_idx], + } + suppressed_obj_ids = tracker_metadata["rank0_metadata"][ + "suppressed_obj_ids" + ][frame_idx] + self._cache_frame_outputs( + inference_state, + frame_idx, + obj_id_to_mask, + suppressed_obj_ids=suppressed_obj_ids, + ) + suppressed_obj_ids = tracker_metadata["rank0_metadata"][ + "suppressed_obj_ids" + ][frame_idx] + yield ( + frame_idx, + self._postprocess_output( + inference_state, out, suppressed_obj_ids=suppressed_obj_ids + ), + ) + else: + yield frame_idx, None + + def add_action_history( + self, inference_state, action_type, frame_idx=None, obj_ids=None + ): + """ + action_history is used to automatically decide what to do during propagation. + action_type: one of ["add", "remove", "refine"] + ["propagation_full", "propagation_partial", "propagation_fetch"] + """ + instance_actions = ["add", "remove", "refine"] + propagation_actions = [ + "propagation_full", + "propagation_partial", + "propagation_fetch", + ] + assert ( + action_type in instance_actions + propagation_actions + ), f"Invalid action type: {action_type}, must be one of {instance_actions + propagation_actions}" + action = { + "type": action_type, + "frame_idx": frame_idx, + "obj_ids": obj_ids, + } + inference_state["action_history"].append(action) + + def _has_object_been_refined(self, inference_state, obj_id): + action_history = inference_state["action_history"] + for action in action_history: + if action["type"] in ["add", "refine"] and action.get("obj_ids"): + if obj_id in action["obj_ids"]: + return True + return False + + def parse_action_history_for_propagation(self, inference_state): + """ + Parse the actions in history before the last propagation and prepare for the next propagation. + We support multiple actions (add/remove/refine) between two propagations. If we had an action + history similar to this ["propagate", "add", "refine", "remove", "add"], the next propagation + would remove the removed object, and also propagate the two added/refined objects. + + Returns: + propagation_type: one of ["propagation_full", "propagation_partial", "propagation_fetch"] + - "propagation_full": run VG propagation for all objects + - "propagation_partial": run Tracker propagation for selected objects, useful for add/refine actions + - "propagation_fetch": fetch existing VG predictions without running any propagation + obj_ids: list of object ids to run Tracker propagation on if propagation_type is "propagation_partial". + """ + action_history = inference_state["action_history"] + if len(action_history) == 0: + # we run propagation for the first time + return "propagation_full", None + + if "propagation" in action_history[-1]["type"]: + if action_history[-1]["type"] in ["propagation_fetch"]: + # last propagation is direct fetch, we fetch existing predictions + return "propagation_fetch", None + elif action_history[-1]["type"] in [ + "propagation_partial", + "propagation_full", + ]: + # we do fetch prediction if we have already run propagation twice or we have run + # propagation once and it is from the first frame or last frame. + if ( + len(action_history) > 1 + and action_history[-2]["type"] + in ["propagation_partial", "propagation_full"] + ) or action_history[-1]["frame_idx"] in [ + 0, + inference_state["num_frames"] - 1, + ]: + # we have run both forward and backward partial/full propagation + return "propagation_fetch", None + else: + # we have run partial/full forward or backward propagation once, need run it for the rest of the frames + return action_history[-1]["type"], action_history[-1]["obj_ids"] + + # parse actions since last propagation + obj_ids = [] + for action in action_history[::-1]: + if "propagation" in action["type"]: + # we reached the last propagation action, stop parsing + break + if action["type"] in ["add", "refine"]: + obj_ids.extend(action["obj_ids"]) + # else action["type"] == "remove": noop + obj_ids = list(set(obj_ids)) if len(obj_ids) > 0 else None + propagation_type = ( + "propagation_partial" if obj_ids is not None else "propagation_fetch" + ) + return propagation_type, obj_ids + + def remove_object(self, inference_state, obj_id, is_user_action=False): + """ + We try to remove object from tracker states on every GPU, it will do nothing + for states without this object. + """ + obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id) + assert obj_rank is not None, f"Object {obj_id} not found in any GPU." + + tracker_states_local = inference_state["tracker_inference_states"] + if self.rank == obj_rank: + self._tracker_remove_object(tracker_states_local, obj_id) + + if is_user_action: + self.add_action_history( + inference_state, action_type="remove", obj_ids=[obj_id] + ) + + # update metadata + tracker_metadata = inference_state["tracker_metadata"] + _obj_ids = tracker_metadata["obj_ids_per_gpu"][obj_rank] + tracker_metadata["obj_ids_per_gpu"][obj_rank] = _obj_ids[_obj_ids != obj_id] + tracker_metadata["num_obj_per_gpu"][obj_rank] = len( + tracker_metadata["obj_ids_per_gpu"][obj_rank] + ) + tracker_metadata["obj_ids_all_gpu"] = np.concatenate( + tracker_metadata["obj_ids_per_gpu"] + ) + tracker_metadata["obj_id_to_score"].pop(obj_id, None) + # tracker_metadata["max_obj_id"] # we do not reuse the object id, so we do not update it here + + # Clean up cached frame outputs to remove references to the deleted object + if "cached_frame_outputs" in inference_state: + for frame_idx in inference_state["cached_frame_outputs"]: + frame_cache = inference_state["cached_frame_outputs"][frame_idx] + if obj_id in frame_cache: + del frame_cache[obj_id] + + def _get_gpu_id_by_obj_id(self, inference_state, obj_id): + """ + Locate GPU ID for a given object. + """ + obj_ids_per_gpu = inference_state["tracker_metadata"]["obj_ids_per_gpu"] + for rank, obj_ids in enumerate(obj_ids_per_gpu): + if obj_id in obj_ids: + return rank + return None # object not found in any GPU + + def _get_tracker_inference_states_by_obj_ids(self, inference_state, obj_ids): + """ + Get the Tracker inference states that contain the given object ids. + This is used to run partial Tracker propagation on a single object/bucket. + Possibly multiple or zero states can be returned. + """ + states = [ + state + for state in inference_state["tracker_inference_states"] + if set(obj_ids) & set(state["obj_ids"]) + ] + return states + + def _prepare_backbone_feats(self, inference_state, frame_idx, reverse): + input_batch = inference_state["input_batch"] + feature_cache = inference_state["feature_cache"] + num_frames = inference_state["num_frames"] + geometric_prompt = ( + inference_state["constants"]["empty_geometric_prompt"] + if inference_state["per_frame_geometric_prompt"][frame_idx] is None + else inference_state["per_frame_geometric_prompt"][frame_idx] + ) + _ = self.run_backbone_and_detection( + frame_idx=frame_idx, + num_frames=num_frames, + input_batch=input_batch, + geometric_prompt=geometric_prompt, + feature_cache=feature_cache, + reverse=reverse, + allow_new_detections=True, + ) + + @torch.inference_mode() + def add_prompt( + self, + inference_state, + frame_idx, + text_str=None, + boxes_xywh=None, + box_labels=None, + points=None, + point_labels=None, + obj_id=None, + rel_coordinates=True, + ): + if points is not None: + # Tracker instance prompts + assert ( + text_str is None and boxes_xywh is None + ), "When points are provided, text_str and boxes_xywh must be None." + assert ( + obj_id is not None + ), "When points are provided, obj_id must be provided." + return self.add_tracker_new_points( + inference_state, + frame_idx, + obj_id=obj_id, + points=points, + labels=point_labels, + rel_coordinates=rel_coordinates, + use_prev_mem_frame=self.use_prev_mem_frame, + ) + else: + # SAM3 prompts + return super().add_prompt( + inference_state, + frame_idx, + text_str=text_str, + boxes_xywh=boxes_xywh, + box_labels=box_labels, + ) + + @torch.inference_mode() + def add_tracker_new_points( + self, + inference_state, + frame_idx, + obj_id, + points, + labels, + rel_coordinates=True, + use_prev_mem_frame=False, + ): + """Add a new point prompt to Tracker. Suppporting instance refinement to existing + objects by passing existing obj_id or adding a new object by passing a new obj_id. + use_prev_mem_frame=False to disable cross attention to previous memory frames. + Every GPU returns the same results, and results should contain all masks including + these masks not refined or not added by the current user points. + """ + assert obj_id is not None, "obj_id must be provided to add new points" + tracker_metadata = inference_state["tracker_metadata"] + if tracker_metadata == {}: + # initialize masklet metadata if it's uninitialized (empty dict) + tracker_metadata.update(self._initialize_metadata()) + + obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id) + + # prepare feature + self._prepare_backbone_feats(inference_state, frame_idx, reverse=False) + + object_has_been_refined = self._has_object_been_refined(inference_state, obj_id) + if ( + obj_rank is not None + and self.use_stateless_refinement + and not object_has_been_refined + ): + # The first time we start refinement on the object, we remove it. + logger.debug( + f"[rank={self.rank}] Removing object {obj_id} before refinement." + ) + self.remove_object(inference_state, obj_id, is_user_action=False) + obj_rank = None + + if obj_rank is None: + # new object, we assign it a GPU and create a new inference state if limit allows + num_prev_obj = np.sum(tracker_metadata["num_obj_per_gpu"]) + if num_prev_obj >= self.max_num_objects: + logger.warning( + f"add_tracker_new_points: cannot add a new object as we are already tracking {num_prev_obj=} " + f"masklets (under {self.max_num_objects=})" + ) + obj_ids = [] + H_low_res = W_low_res = self.tracker.low_res_mask_size + H_video_res = inference_state["orig_height"] + W_video_res = inference_state["orig_width"] + low_res_masks = torch.zeros(0, 1, H_low_res, W_low_res) + video_res_masks = torch.zeros(0, 1, H_video_res, W_video_res) + return frame_idx, obj_ids, low_res_masks, video_res_masks + + new_det_gpu_ids = self._assign_new_det_to_gpus( + new_det_num=1, + prev_workload_per_gpu=tracker_metadata["num_obj_per_gpu"], + ) + obj_rank = new_det_gpu_ids[0] + + # get tracker inference state for the new object + if self.rank == obj_rank: + # for batched inference, we create a new inference state + tracker_state = self._init_new_tracker_state(inference_state) + inference_state["tracker_inference_states"].append(tracker_state) + + # update metadata + tracker_metadata["obj_ids_per_gpu"][obj_rank] = np.concatenate( + [ + tracker_metadata["obj_ids_per_gpu"][obj_rank], + np.array([obj_id], dtype=np.int64), + ] + ) + tracker_metadata["num_obj_per_gpu"][obj_rank] = len( + tracker_metadata["obj_ids_per_gpu"][obj_rank] + ) + tracker_metadata["obj_ids_all_gpu"] = np.concatenate( + tracker_metadata["obj_ids_per_gpu"] + ) + tracker_metadata["max_obj_id"] = max(tracker_metadata["max_obj_id"], obj_id) + + logger.debug( + f"[rank={self.rank}] Adding new object with id {obj_id} at frame {frame_idx}." + ) + self.add_action_history( + inference_state, "add", frame_idx=frame_idx, obj_ids=[obj_id] + ) + else: + # existing object, for refinement + if self.rank == obj_rank: + tracker_states = self._get_tracker_inference_states_by_obj_ids( + inference_state, [obj_id] + ) + assert ( + len(tracker_states) == 1 + ), f"[rank={self.rank}] Multiple Tracker inference states found for the same object id." + tracker_state = tracker_states[0] + + # log + logger.debug( + f"[rank={self.rank}] Refining existing object with id {obj_id} at frame {frame_idx}." + ) + self.add_action_history( + inference_state, "refine", frame_idx=frame_idx, obj_ids=[obj_id] + ) + + # assign higher score to added/refined object + tracker_metadata["obj_id_to_score"][obj_id] = 1.0 + tracker_metadata["obj_id_to_tracker_score_frame_wise"][frame_idx][obj_id] = 1.0 + + if self.rank == 0: + rank0_metadata = tracker_metadata.get("rank0_metadata", {}) + + if "removed_obj_ids" in rank0_metadata: + rank0_metadata["removed_obj_ids"].discard(obj_id) + + if "suppressed_obj_ids" in rank0_metadata: + for frame_id in rank0_metadata["suppressed_obj_ids"]: + rank0_metadata["suppressed_obj_ids"][frame_id].discard(obj_id) + + if "masklet_confirmation" in rank0_metadata: + obj_ids_all_gpu = tracker_metadata["obj_ids_all_gpu"] + obj_indices = np.where(obj_ids_all_gpu == obj_id)[0] + if len(obj_indices) > 0: + obj_idx = obj_indices[0] + if obj_idx < len(rank0_metadata["masklet_confirmation"]["status"]): + rank0_metadata["masklet_confirmation"]["status"][obj_idx] = 1 + rank0_metadata["masklet_confirmation"]["consecutive_det_num"][ + obj_idx + ] = self.masklet_confirmation_consecutive_det_thresh + + if self.rank == obj_rank: + frame_idx, obj_ids, low_res_masks, video_res_masks = ( + self.tracker.add_new_points( + inference_state=tracker_state, + frame_idx=frame_idx, + obj_id=obj_id, + points=points, + labels=labels, + clear_old_points=True, + rel_coordinates=rel_coordinates, + use_prev_mem_frame=use_prev_mem_frame, + ) + ) + + if video_res_masks is not None and len(video_res_masks) > 0: + video_res_masks = fill_holes_in_mask_scores( + video_res_masks, # shape (N, 1, H_video, W_video) + max_area=self.fill_hole_area, + fill_holes=True, + remove_sprinkles=True, + ) + + # Since the mem encoder has already run for the current input points? + self.tracker.propagate_in_video_preflight( + tracker_state, run_mem_encoder=True + ) + # Clear detector conditioning frames when user clicks are received to allow + # model updating masks on these frames. It is a noop if user is refining on the + # detector conditioning frames or adding new objects. + self.clear_detector_added_cond_frame_in_tracker( + tracker_state, obj_id, frame_idx + ) + + # fetch results from states and gather across GPUs + # Use optimized caching approach to avoid reprocessing unmodified objects + if self.rank == obj_rank and len(obj_ids) > 0: + new_mask_data = (video_res_masks[obj_ids.index(obj_id)] > 0.0).to( + torch.bool + ) + else: + new_mask_data = None + # Broadcast the new mask data across all ranks for consistency + if self.world_size > 1: + data_list = [new_mask_data.cpu() if new_mask_data is not None else None] + self.broadcast_python_obj_cpu(data_list, src=obj_rank) + new_mask_data = data_list[0].to(self.device) + + if self.rank == 0: + obj_id_to_mask = self._build_tracker_output( + inference_state, + frame_idx, + {obj_id: new_mask_data} if new_mask_data is not None else None, + ) + # post processing - remove suppressed obj_ids + obj_id_to_score = tracker_metadata["obj_id_to_score"] + suppressed_obj_ids = tracker_metadata["rank0_metadata"][ + "suppressed_obj_ids" + ][frame_idx] + obj_id_to_tracker_score = tracker_metadata[ + "obj_id_to_tracker_score_frame_wise" + ][frame_idx] + + out = { + "obj_id_to_mask": obj_id_to_mask, + "obj_id_to_score": obj_id_to_score, + "obj_id_to_tracker_score": obj_id_to_tracker_score, + } + self._cache_frame_outputs( + inference_state, + frame_idx, + obj_id_to_mask, + suppressed_obj_ids=suppressed_obj_ids, + ) + return frame_idx, self._postprocess_output( + inference_state, out, suppressed_obj_ids=suppressed_obj_ids + ) + else: + return frame_idx, None # no output on other GPUs + + def _gather_obj_id_to_mask_across_gpus(self, inference_state, obj_id_to_mask_local): + """Gather obj_id_to_mask from all GPUs. Optionally resize the masks to the video resolution.""" + tracker_metadata = inference_state["tracker_metadata"] + + # concatenate the output masklets from all local inference states + H_mask = W_mask = self.tracker.low_res_mask_size + obj_ids_local = tracker_metadata["obj_ids_per_gpu"][self.rank] + low_res_masks_local = [] + for obj_id in obj_ids_local: + if obj_id in obj_id_to_mask_local: + low_res_masks_local.append(obj_id_to_mask_local[obj_id]) + else: + low_res_masks_local.append( + torch.full((H_mask, W_mask), -1024.0, device=self.device) + ) + if len(low_res_masks_local) > 0: + low_res_masks_local = torch.stack(low_res_masks_local, dim=0) # (N, H, W) + assert low_res_masks_local.shape[1:] == (H_mask, W_mask) + else: + low_res_masks_local = torch.zeros(0, H_mask, W_mask, device=self.device) + + # all-gather `low_res_masks_local` into `low_res_masks_global` + # - low_res_masks_global: Tensor -- (num_global_obj, H_mask, W_mask) + if self.world_size > 1: + low_res_masks_local = low_res_masks_local.float().contiguous() + low_res_masks_peers = [ + low_res_masks_local.new_empty(num_obj, H_mask, W_mask) + for num_obj in tracker_metadata["num_obj_per_gpu"] + ] + dist.all_gather(low_res_masks_peers, low_res_masks_local) + low_res_masks_global = torch.cat(low_res_masks_peers, dim=0) + else: + low_res_masks_global = low_res_masks_local + return low_res_masks_global + + def _convert_low_res_mask_to_video_res(self, low_res_mask, inference_state): + """ + Convert a low-res mask to video resolution, matching the format expected by _build_tracker_output. + + Args: + low_res_mask: Tensor of shape (H_low_res, W_low_res) + inference_state: Contains video dimensions + + Returns: + video_res_mask: Tensor of shape (1, H_video, W_video) bool + """ + if low_res_mask is None: + return None + + # Convert to 3D for interpolation: (H_low_res, W_low_res) -> (1, H_low_res, W_low_res) + low_res_mask_3d = low_res_mask.unsqueeze(0).unsqueeze(0) + + # Get video dimensions + H_video = inference_state["orig_height"] + W_video = inference_state["orig_width"] + + video_res_mask = F.interpolate( + low_res_mask_3d.float(), + size=(H_video, W_video), + mode="bilinear", + align_corners=False, + ) # (1, H_video, W_video) + + # Convert to boolean - already in the right shape! + return (video_res_mask.squeeze(0) > 0.0).to(torch.bool) + + def clear_detector_added_cond_frame_in_tracker( + self, tracker_state, obj_id, refined_frame_idx + ): + """Clear detector added conditioning frame if it is within a predefined window + of the refined frame. This allow model to update masks on these frames.""" + obj_idx = self.tracker._obj_id_to_idx(tracker_state, obj_id) + + mask_only_cond_frame_indices = [] + window = self.refinement_detector_cond_frame_removal_window + for frame_idx in tracker_state["mask_inputs_per_obj"][obj_idx]: + if frame_idx not in tracker_state["point_inputs_per_obj"][obj_idx]: + # clear conditioning frames within a window of the refined frame + if abs(frame_idx - refined_frame_idx) <= window: + mask_only_cond_frame_indices.append(frame_idx) + + # clear + if len(mask_only_cond_frame_indices) > 0: + for frame_idx in mask_only_cond_frame_indices: + # obj_ids_on_this_frame is essentially all obj_ids in the state + # since they are bucket batched + obj_ids_on_this_frame = tracker_state["obj_id_to_idx"].keys() + for obj_id2 in obj_ids_on_this_frame: + self.tracker.clear_all_points_in_frame( + tracker_state, frame_idx, obj_id2, need_output=False + ) + logger.debug( + f"Cleared detector mask only conditioning frames ({mask_only_cond_frame_indices}) in Tracker." + ) + return + + +def is_image_type(resource_path: str) -> bool: + if isinstance(resource_path, list): + return len(resource_path) == 1 + return resource_path.lower().endswith(tuple(IMAGE_EXTS)) diff --git a/detect_tools/sam3/sam3/model/sam3_video_predictor.py b/detect_tools/sam3/sam3/model/sam3_video_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..c639e1d058eb7389aef914e0b7de27c86b693230 --- /dev/null +++ b/detect_tools/sam3/sam3/model/sam3_video_predictor.py @@ -0,0 +1,521 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import datetime +import gc +import multiprocessing as mp +import os +import queue +import socket +import sys +import time +import uuid +from contextlib import closing +from typing import List, Optional + +import psutil +import torch + +from sam3.logger import get_logger + +logger = get_logger(__name__) + + +class Sam3VideoPredictor: + # a global dictionary that holds all inference states for this model (key is session_id) + _ALL_INFERENCE_STATES = {} + + def __init__( + self, + checkpoint_path=None, + bpe_path=None, + has_presence_token=True, + geo_encoder_use_img_cross_attn=True, + strict_state_dict_loading=True, + async_loading_frames=False, + video_loader_type="cv2", + apply_temporal_disambiguation: bool = True, + ): + self.async_loading_frames = async_loading_frames + self.video_loader_type = video_loader_type + from sam3.model_builder import build_sam3_video_model + + self.model = ( + build_sam3_video_model( + checkpoint_path=checkpoint_path, + bpe_path=bpe_path, + has_presence_token=has_presence_token, + geo_encoder_use_img_cross_attn=geo_encoder_use_img_cross_attn, + strict_state_dict_loading=strict_state_dict_loading, + apply_temporal_disambiguation=apply_temporal_disambiguation, + ) + .cuda() + .eval() + ) + + @torch.inference_mode() + def handle_request(self, request): + """Dispatch a request based on its type.""" + request_type = request["type"] + if request_type == "start_session": + return self.start_session( + resource_path=request["resource_path"], + session_id=request.get("session_id", None), + ) + elif request_type == "add_prompt": + return self.add_prompt( + session_id=request["session_id"], + frame_idx=request["frame_index"], + text=request.get("text", None), + points=request.get("points", None), + point_labels=request.get("point_labels", None), + bounding_boxes=request.get("bounding_boxes", None), + bounding_box_labels=request.get("bounding_box_labels", None), + obj_id=request.get("obj_id", None), + ) + elif request_type == "remove_object": + return self.remove_object( + session_id=request["session_id"], + obj_id=request["obj_id"], + is_user_action=request.get("is_user_action", True), + ) + elif request_type == "reset_session": + return self.reset_session(session_id=request["session_id"]) + elif request_type == "close_session": + return self.close_session(session_id=request["session_id"]) + else: + raise RuntimeError(f"invalid request type: {request_type}") + + @torch.inference_mode() + def handle_stream_request(self, request): + """Dispatch a stream request based on its type.""" + request_type = request["type"] + if request_type == "propagate_in_video": + yield from self.propagate_in_video( + session_id=request["session_id"], + propagation_direction=request.get("propagation_direction", "both"), + start_frame_idx=request.get("start_frame_index", None), + max_frame_num_to_track=request.get("max_frame_num_to_track", None), + ) + else: + raise RuntimeError(f"invalid request type: {request_type}") + + def start_session(self, resource_path, session_id=None): + """ + Start a new inference session on an image or a video. Here `resource_path` + can be either a path to an image file (for image inference) or an MP4 file + or directory with JPEG video frames (for video inference). + + If `session_id` is defined, it will be used as identifier for the + session. If it is not defined, the start_session function will create + a session id and return it. + """ + # get an initial inference_state from the model + inference_state = self.model.init_state( + resource_path=resource_path, + async_loading_frames=self.async_loading_frames, + video_loader_type=self.video_loader_type, + ) + if not session_id: + session_id = str(uuid.uuid4()) + self._ALL_INFERENCE_STATES[session_id] = { + "state": inference_state, + "session_id": session_id, + "start_time": time.time(), + } + logger.debug( + f"started new session {session_id}; {self._get_session_stats()}; " + f"{self._get_torch_and_gpu_properties()}" + ) + return {"session_id": session_id} + + def add_prompt( + self, + session_id: str, + frame_idx: int, + text: Optional[str] = None, + points: Optional[List[List[float]]] = None, + point_labels: Optional[List[int]] = None, + bounding_boxes: Optional[List[List[float]]] = None, + bounding_box_labels: Optional[List[int]] = None, + obj_id: Optional[int] = None, + ): + """Add text, box and/or point prompt on a specific video frame.""" + logger.debug( + f"add prompt on frame {frame_idx} in session {session_id}: " + f"{text=}, {points=}, {point_labels=}, " + f"{bounding_boxes=}, {bounding_box_labels=}" + ) + session = self._get_session(session_id) + inference_state = session["state"] + + frame_idx, outputs = self.model.add_prompt( + inference_state=inference_state, + frame_idx=frame_idx, + text_str=text, + points=points, + point_labels=point_labels, + boxes_xywh=bounding_boxes, + box_labels=bounding_box_labels, + obj_id=obj_id, + ) + return {"frame_index": frame_idx, "outputs": outputs} + + def remove_object( + self, + session_id: str, + obj_id: int, + is_user_action: bool = True, + ): + """Remove an object from tracking.""" + logger.debug( + f"remove object {obj_id} in session {session_id}: " f"{is_user_action=}" + ) + session = self._get_session(session_id) + inference_state = session["state"] + + self.model.remove_object( + inference_state=inference_state, + obj_id=obj_id, + is_user_action=is_user_action, + ) + return {"is_success": True} + + def propagate_in_video( + self, + session_id, + propagation_direction, + start_frame_idx, + max_frame_num_to_track, + ): + """Propagate the added prompts to get grounding results on all video frames.""" + logger.debug( + f"propagate in video in session {session_id}: " + f"{propagation_direction=}, {start_frame_idx=}, {max_frame_num_to_track=}" + ) + try: + session = self._get_session(session_id) + inference_state = session["state"] + if propagation_direction not in ["both", "forward", "backward"]: + raise ValueError( + f"invalid propagation direction: {propagation_direction}" + ) + + # First doing the forward propagation + if propagation_direction in ["both", "forward"]: + for frame_idx, outputs in self.model.propagate_in_video( + inference_state=inference_state, + start_frame_idx=start_frame_idx, + max_frame_num_to_track=max_frame_num_to_track, + reverse=False, + ): + yield {"frame_index": frame_idx, "outputs": outputs} + # Then doing the backward propagation (reverse in time) + if propagation_direction in ["both", "backward"]: + for frame_idx, outputs in self.model.propagate_in_video( + inference_state=inference_state, + start_frame_idx=start_frame_idx, + max_frame_num_to_track=max_frame_num_to_track, + reverse=True, + ): + yield {"frame_index": frame_idx, "outputs": outputs} + finally: + # Log upon completion (so that e.g. we can see if two propagations happen in parallel). + # Using `finally` here to log even when the tracking is aborted with GeneratorExit. + logger.debug( + f"propagation ended in session {session_id}; {self._get_session_stats()}" + ) + + def reset_session(self, session_id): + """Reset the session to its initial state (as when it's initial opened).""" + logger.debug(f"reset session {session_id}") + session = self._get_session(session_id) + inference_state = session["state"] + self.model.reset_state(inference_state) + return {"is_success": True} + + def close_session(self, session_id): + """ + Close a session. This method is idempotent and can be called multiple + times on the same "session_id". + """ + session = self._ALL_INFERENCE_STATES.pop(session_id, None) + if session is None: + logger.warning( + f"cannot close session {session_id} as it does not exist (it might have expired); " + f"{self._get_session_stats()}" + ) + else: + del session + gc.collect() + logger.info(f"removed session {session_id}; {self._get_session_stats()}") + return {"is_success": True} + + def _get_session(self, session_id): + session = self._ALL_INFERENCE_STATES.get(session_id, None) + if session is None: + raise RuntimeError( + f"Cannot find session {session_id}; it might have expired" + ) + return session + + def _get_session_stats(self): + """Get a statistics string for live sessions and their GPU usage.""" + # print both the session ids and their video frame numbers + live_session_strs = [ + f"'{session_id}' ({session['state']['num_frames']} frames)" + for session_id, session in self._ALL_INFERENCE_STATES.items() + ] + session_stats_str = ( + f"live sessions: [{', '.join(live_session_strs)}], GPU memory: " + f"{torch.cuda.memory_allocated() // 1024**2} MiB used and " + f"{torch.cuda.memory_reserved() // 1024**2} MiB reserved" + f" (max over time: {torch.cuda.max_memory_allocated() // 1024**2} MiB used " + f"and {torch.cuda.max_memory_reserved() // 1024**2} MiB reserved)" + ) + return session_stats_str + + def _get_torch_and_gpu_properties(self): + """Get a string for PyTorch and GPU properties (for logging and debugging).""" + torch_and_gpu_str = ( + f"torch: {torch.__version__} with CUDA arch {torch.cuda.get_arch_list()}, " + f"GPU device: {torch.cuda.get_device_properties(torch.cuda.current_device())}" + ) + return torch_and_gpu_str + + def shutdown(self): + """Shutdown the predictor and clear all sessions.""" + self._ALL_INFERENCE_STATES.clear() + + +class Sam3VideoPredictorMultiGPU(Sam3VideoPredictor): + def __init__(self, *model_args, gpus_to_use=None, **model_kwargs): + if gpus_to_use is None: + # if not specified, use only the current GPU by default + gpus_to_use = [torch.cuda.current_device()] + + IS_MAIN_PROCESS = os.getenv("IS_MAIN_PROCESS", "1") == "1" + if IS_MAIN_PROCESS: + gpus_to_use = sorted(set(gpus_to_use)) + logger.info(f"using the following GPU IDs: {gpus_to_use}") + assert len(gpus_to_use) > 0 and all(isinstance(i, int) for i in gpus_to_use) + assert all(0 <= i < torch.cuda.device_count() for i in gpus_to_use) + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = f"{self._find_free_port()}" + os.environ["RANK"] = "0" + os.environ["WORLD_SIZE"] = f"{len(gpus_to_use)}" + + self.gpus_to_use = gpus_to_use + self.rank = int(os.environ["RANK"]) + self.world_size = int(os.environ["WORLD_SIZE"]) + self.rank_str = f"rank={self.rank} with world_size={self.world_size}" + self.device = torch.device(f"cuda:{self.gpus_to_use[self.rank]}") + torch.cuda.set_device(self.device) + self.has_shutdown = False + if self.rank == 0: + logger.info("\n\n\n\t*** START loading model on all ranks ***\n\n") + + logger.info(f"loading model on {self.rank_str} -- this could take a while ...") + super().__init__(*model_args, **model_kwargs) + logger.info(f"loading model on {self.rank_str} -- DONE locally") + + if self.world_size > 1 and self.rank == 0: + # start the worker processes *after* the model is loaded in the main process + # so that the main process can run torch.compile and fill the cache first + self._start_worker_processes(*model_args, **model_kwargs) + for rank in range(1, self.world_size): + self.command_queues[rank].put(("start_nccl_process_group", None)) + self._start_nccl_process_group() + + if self.rank == 0: + logger.info("\n\n\n\t*** DONE loading model on all ranks ***\n\n") + + @torch.inference_mode() + def handle_request(self, request): + """Dispatch a request based on its type.""" + if self.has_shutdown: + raise RuntimeError( + "cannot handle request after the predictor has shutdown; please create a new predictor" + ) + + # when starting a session, we need to create a session id before dispatching + # the request to the workers + if request["type"] == "start_session" and request.get("session_id") is None: + request["session_id"] = str(uuid.uuid4()) + # dispatch the request to all worker processes + if self.world_size > 1 and self.rank == 0: + for rank in range(1, self.world_size): + self.command_queues[rank].put((request, False)) + + response = super().handle_request(request) + + if self.world_size > 1: + torch.distributed.barrier() # wait for all ranks to finish + return response + + @torch.inference_mode() + def handle_stream_request(self, request): + """Dispatch a stream request based on its type.""" + if self.has_shutdown: + raise RuntimeError( + "cannot handle request after the predictor has shutdown; please create a new predictor" + ) + + # dispatch the request to all worker processes + if self.world_size > 1 and self.rank == 0: + for rank in range(1, self.world_size): + self.command_queues[rank].put((request, True)) + + yield from super().handle_stream_request(request) + + if self.world_size > 1: + torch.distributed.barrier() # wait for all ranks to finish + + def _start_worker_processes(self, *model_args, **model_kwargs): + """Start worker processes for handling model inference.""" + world_size = self.world_size + logger.info(f"spawning {world_size - 1} worker processes") + # Use "spawn" (instead of "fork") for different PyTorch or CUDA context + mp_ctx = mp.get_context("spawn") + self.command_queues = {rank: mp_ctx.Queue() for rank in range(1, world_size)} + self.result_queues = {rank: mp_ctx.Queue() for rank in range(1, world_size)} + parent_pid = os.getpid() + for rank in range(1, world_size): + # set the environment variables for each worker process + os.environ["IS_MAIN_PROCESS"] = "0" # mark this as a worker process + os.environ["RANK"] = f"{rank}" + worker_process = mp_ctx.Process( + target=Sam3VideoPredictorMultiGPU._worker_process_command_loop, + args=( + rank, + world_size, + self.command_queues[rank], + self.result_queues[rank], + model_args, + model_kwargs, + self.gpus_to_use, + parent_pid, + ), + daemon=True, + ) + worker_process.start() + # revert the environment variables for the main process + os.environ["IS_MAIN_PROCESS"] = "1" + os.environ["RANK"] = "0" + # wait for all the worker processes to load the model and collect their PIDs + self.worker_pids = {} + for rank in range(1, self.world_size): + # a large timeout to cover potentially long model loading time due to compilation + _, worker_pid = self.result_queues[rank].get(timeout=7200) + self.worker_pids[rank] = worker_pid + logger.info(f"spawned {world_size - 1} worker processes") + + def _start_nccl_process_group(self): + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + if world_size == 1: + return + + logger.debug(f"starting NCCL process group on {rank=} with {world_size=}") + assert not torch.distributed.is_initialized() + # use the "env://" init method with environment variables set in start_worker_processes + # a short 3-min timeout to quickly detect any synchronization failures + timeout_sec = int(os.getenv("SAM3_COLLECTIVE_OP_TIMEOUT_SEC", "180")) + timeout = datetime.timedelta(seconds=timeout_sec) + torch.distributed.init_process_group( + backend="nccl", + init_method="env://", + timeout=timeout, + device_id=self.device, + ) + # warm-up the NCCL process group by running a dummy all-reduce + tensor = torch.ones(1024, 1024).cuda() + torch.distributed.all_reduce(tensor) + logger.debug(f"started NCCL process group on {rank=} with {world_size=}") + + def _find_free_port(self) -> int: + """ + Find a free port (a random free port from 1024 to 65535 will be selected) + https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number) + """ + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + s.bind(("", 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return s.getsockname()[1] + + @staticmethod + def _worker_process_command_loop( + rank, + world_size, + command_queue, + result_queue, + model_args, + model_kwargs, + gpus_to_use, + parent_pid, + ): + """ + The command loop for each worker process. It listens to commands from the main process + and executes them using the model. + """ + logger.info(f"starting worker process {rank=} with {world_size=}") + # verify that the environment variables are set correctly + assert int(os.environ["IS_MAIN_PROCESS"]) == 0 + assert int(os.environ["RANK"]) == rank + assert int(os.environ["WORLD_SIZE"]) == world_size + # load the model in this worker process + predictor = Sam3VideoPredictorMultiGPU( + *model_args, gpus_to_use=gpus_to_use, **model_kwargs + ) + logger.info(f"started worker {rank=} with {world_size=}") + # return the worker process id to the main process for bookkeeping + worker_pid = os.getpid() + result_queue.put(("load_model", worker_pid)) + + # wait for the command to start the NCCL process group + request_type, _ = command_queue.get(timeout=7200) + assert request_type == "start_nccl_process_group" + predictor._start_nccl_process_group() + + # keep listening to commands from the main process + while True: + try: + request, is_stream_request = command_queue.get(timeout=5.0) + if request == "shutdown": + logger.info(f"worker {rank=} shutting down") + torch.distributed.destroy_process_group() + result_queue.put(("shutdown", True)) # acknowledge the shutdown + sys.exit(0) + + logger.debug(f"worker {rank=} received request {request['type']=}") + if is_stream_request: + for _ in predictor.handle_stream_request(request): + pass # handle stream requests in a generator fashion + else: + predictor.handle_request(request) + except queue.Empty: + # Usually Python's multiprocessing module will shutdown all the daemon worker + # processes when the main process exits gracefully. However, the user may kill + # the main process using SIGKILL and thereby leaving no chance for the main process + # to clean up its daemon child processes. So here we manually check whether the + # parent process still exists (every 5 sec as in `command_queue.get` timeout). + if not psutil.pid_exists(parent_pid): + logger.info( + f"stopping worker {rank=} as its parent process has exited" + ) + sys.exit(1) + except Exception as e: + logger.error(f"worker {rank=} exception: {e}", exc_info=True) + + def shutdown(self): + """Shutdown all worker processes.""" + if self.rank == 0 and self.world_size > 1: + logger.info(f"shutting down {self.world_size - 1} worker processes") + for rank in range(1, self.world_size): + self.command_queues[rank].put(("shutdown", False)) + torch.distributed.destroy_process_group() + for rank in range(1, self.world_size): + self.result_queues[rank].get() # wait for the worker to acknowledge + logger.info(f"shut down {self.world_size - 1} worker processes") + self.has_shutdown = True + + super().shutdown() diff --git a/detect_tools/sam3/sam3/model/text_encoder_ve.py b/detect_tools/sam3/sam3/model/text_encoder_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..b1cf145ca5eae383e0ecce8d7358570125eaadb3 --- /dev/null +++ b/detect_tools/sam3/sam3/model/text_encoder_ve.py @@ -0,0 +1,328 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from collections import OrderedDict +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from .model_misc import LayerScale + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, + d_model: int, + n_head: int, + mlp_ratio: float = 4.0, + ls_init_value: Optional[float] = None, + act_layer: Callable[[], nn.Module] = nn.GELU, + norm_layer: Callable[[int], nn.Module] = nn.LayerNorm, + ): + super().__init__() + # Attention + self.attn = nn.MultiheadAttention(d_model, n_head, batch_first=True) + + # LayerNorm, LayerScale + self.ln_1 = norm_layer(d_model) + self.ln_2 = norm_layer(d_model) + + self.ls_1 = ( + LayerScale(d_model, ls_init_value) + if ls_init_value is not None + else nn.Identity() + ) + self.ls_2 = ( + LayerScale(d_model, ls_init_value) + if ls_init_value is not None + else nn.Identity() + ) + + # MLP + mlp_width = int(d_model * mlp_ratio) + self.mlp = nn.Sequential( + OrderedDict( + [ + ("c_fc", nn.Linear(d_model, mlp_width)), + ("gelu", act_layer()), + ("c_proj", nn.Linear(mlp_width, d_model)), + ] + ) + ) + + def attention( + self, + q_x: torch.Tensor, + k_x: Optional[torch.Tensor] = None, + v_x: Optional[torch.Tensor] = None, + attn_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + k_x = k_x if k_x is not None else q_x + v_x = v_x if v_x is not None else q_x + if attn_mask is not None: + # Leave boolean masks as is + if not attn_mask.dtype == torch.bool: + attn_mask = attn_mask.to(q_x.dtype) + + return self.attn(q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask)[0] + + def forward( + self, + q_x: torch.Tensor, + k_x: Optional[torch.Tensor] = None, + v_x: Optional[torch.Tensor] = None, + attn_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + k_x = ( + self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None + ) + v_x = ( + self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None + ) + x = q_x + self.ls_1( + self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask) + ) + x = x + self.ls_2(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + mlp_ratio: float = 4.0, + ls_init_value: Optional[float] = None, + act_layer: Callable[[], nn.Module] = nn.GELU, + norm_layer: Callable[[int], nn.Module] = nn.LayerNorm, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = False, + ): + super().__init__() + self.width = width + self.layers = layers + self.grad_checkpointing = use_act_checkpoint + self.resblocks = nn.ModuleList( + [ + ResidualAttentionBlock( + width, + heads, + mlp_ratio, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + ) + for _ in range(layers) + ] + ) + + if compile_mode is not None: + self.forward = torch.compile( + self.forward, mode=compile_mode, fullgraph=True + ) + if self.grad_checkpointing: + torch._dynamo.config.optimize_ddp = False + + def forward( + self, + x: torch.Tensor, + attn_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + for _, r in enumerate(self.resblocks): + if ( + self.grad_checkpointing + and not torch.jit.is_scripting() + and self.training + ): + x = checkpoint(r, x, None, None, attn_mask, use_reentrant=False) + else: + x = r( + x, + attn_mask=attn_mask, + ) + return x + + +def text_global_pool( + x: torch.Tensor, text: Optional[torch.Tensor] = None, pool_type: str = "argmax" +) -> Tuple[torch.Tensor, torch.Tensor]: + if pool_type == "first": + pooled, tokens = x[:, 0], x[:, 1:] + elif pool_type == "last": + pooled, tokens = x[:, -1], x[:, :-1] + elif pool_type == "argmax": + # take features from the eot embedding (eot_token is the highest number in each sequence) + assert text is not None + pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x + else: + pooled = tokens = x + return pooled, tokens + + +class TextTransformer(nn.Module): + def __init__( + self, + context_length: int = 77, + vocab_size: int = 49408, + width: int = 512, + heads: int = 8, + layers: int = 12, + mlp_ratio: float = 4.0, + ls_init_value: Optional[float] = None, + output_dim: int = 512, + no_causal_mask: bool = False, + pool_type: str = "none", # no pooling + proj_bias: bool = False, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + output_tokens: bool = False, + use_ln_post: bool = True, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = False, + ): + super().__init__() + assert pool_type in ("first", "last", "argmax", "none") + self.output_tokens = output_tokens + self.num_pos = self.context_length = context_length + self.vocab_size = vocab_size + self.width = width + self.output_dim = output_dim + self.heads = heads + self.pool_type = pool_type + + self.token_embedding = nn.Embedding(self.vocab_size, width) + self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width)) + self.transformer = Transformer( + width=width, + layers=layers, + heads=heads, + mlp_ratio=mlp_ratio, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + compile_mode=compile_mode, + use_act_checkpoint=use_act_checkpoint, + ) + self.ln_final = norm_layer(width) if use_ln_post else nn.Identity() + if no_causal_mask: + self.attn_mask = None + else: + self.register_buffer( + "attn_mask", self.build_causal_mask(), persistent=False + ) + if proj_bias: + self.text_projection = nn.Linear(width, output_dim) + else: + self.text_projection = nn.Parameter(torch.empty(width, output_dim)) + + def build_causal_mask(self) -> torch.Tensor: + # lazily create causal attention mask, with full attention between the tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.num_pos, self.num_pos) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward( + self, text: torch.Tensor + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + seq_len = text.shape[1] + x = self.token_embedding(text) # [batch_size, n_ctx, d_model] + + attn_mask = self.attn_mask + if attn_mask is not None: + attn_mask = attn_mask[:seq_len, :seq_len] + + x = x + self.positional_embedding[:seq_len] + x = self.transformer(x, attn_mask=attn_mask) + + x = self.ln_final(x) + pooled, tokens = text_global_pool(x, text, pool_type=self.pool_type) + if self.text_projection is not None: + if isinstance(self.text_projection, nn.Linear): + pooled = self.text_projection(pooled) + else: + pooled = pooled @ self.text_projection + if self.output_tokens: + return pooled, tokens + return pooled + + +class VETextEncoder(nn.Module): + def __init__( + self, + d_model: int, + tokenizer: Callable, + width: int = 1024, + heads: int = 16, + layers: int = 24, + context_length: int = 32, + vocab_size: int = 49408, + use_ln_post: bool = True, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = True, + ): + super().__init__() + self.context_length = context_length + self.use_ln_post = use_ln_post + self.tokenizer = tokenizer + + self.encoder = TextTransformer( + context_length=self.context_length, + vocab_size=vocab_size, + width=width, + heads=heads, + layers=layers, + # we want the tokens, not just the pooled output + output_tokens=True, + use_ln_post=use_ln_post, + compile_mode=compile_mode, + use_act_checkpoint=use_act_checkpoint, + ) + self.resizer = nn.Linear(self.encoder.width, d_model) + + def forward( + self, + text: Union[List[str], Tuple[torch.Tensor, torch.Tensor, dict]], + input_boxes: Optional[List] = None, + device: torch.device = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + if isinstance(text[0], str): + # no use case for this + assert input_boxes is None or len(input_boxes) == 0, "not supported" + + # Encode the text + tokenized = self.tokenizer(text, context_length=self.context_length).to( + device + ) # [b, seq_len] + text_attention_mask = (tokenized != 0).bool() + + # manually embed the tokens + inputs_embeds = self.encoder.token_embedding( + tokenized + ) # [b, seq_len, d=1024] + _, text_memory = self.encoder(tokenized) # [b, seq_len, d=1024] + + assert text_memory.shape[1] == inputs_embeds.shape[1] + # Invert attention mask because its the opposite in pytorch transformer + text_attention_mask = text_attention_mask.ne(1) + # Transpose memory because pytorch's attention expects sequence first + text_memory = text_memory.transpose(0, 1) + # Resize the encoder hidden states to be of the same d_model as the decoder + text_memory_resized = self.resizer(text_memory) + else: + # The text is already encoded, use as is. + text_attention_mask, text_memory_resized, tokenized = text + inputs_embeds = tokenized["inputs_embeds"] + assert ( + input_boxes is None or len(input_boxes) == 0 + ), "Can't replace boxes in text if it's already encoded" + + # Note that the input_embeds are returned in pytorch's convention (sequence first) + return ( + text_attention_mask, + text_memory_resized, + inputs_embeds.transpose(0, 1), + ) diff --git a/detect_tools/sam3/sam3/model/tokenizer_ve.py b/detect_tools/sam3/sam3/model/tokenizer_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..ef42773db37e84ad8fe165e7f334dab617d47f35 --- /dev/null +++ b/detect_tools/sam3/sam3/model/tokenizer_ve.py @@ -0,0 +1,253 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Text Tokenizer. + +Copied and lightly adapted from VE repo, which in turn copied +from open_clip and openAI CLIP. +""" + +import gzip +import html +import io +import os +import string +from functools import lru_cache +from typing import List, Optional, Union + +import ftfy +import regex as re +import torch +from iopath.common.file_io import g_pathmgr + + +# https://stackoverflow.com/q/62691279 +os.environ["TOKENIZERS_PARALLELISM"] = "false" +DEFAULT_CONTEXT_LENGTH = 77 + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a significant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +def _clean_canonicalize(x): + # basic, remove whitespace, remove punctuation, lower case + return canonicalize_text(basic_clean(x)) + + +def _clean_lower(x): + # basic, remove whitespace, lower case + return whitespace_clean(basic_clean(x)).lower() + + +def _clean_whitespace(x): + # basic, remove whitespace + return whitespace_clean(basic_clean(x)) + + +def get_clean_fn(type: str): + if type == "canonicalize": + return _clean_canonicalize + elif type == "lower": + return _clean_lower + elif type == "whitespace": + return _clean_whitespace + else: + assert False, f"Invalid clean function ({type})." + + +def canonicalize_text(text, *, keep_punctuation_exact_string=None): + """Returns canonicalized `text` (lowercase and punctuation removed). + From: https://github.com/google-research/big_vision/blob/53f18caf27a9419231bbf08d3388b07671616d3d/big_vision/evaluators/proj/image_text/prompt_engineering.py#L94 + Args: + text: string to be canonicalized. + keep_punctuation_exact_string: If provided, then this exact string kept. + For example providing '{}' will keep any occurrences of '{}' (but will + still remove '{' and '}' that appear separately). + """ + text = text.replace("_", " ") + if keep_punctuation_exact_string: + text = keep_punctuation_exact_string.join( + part.translate(str.maketrans("", "", string.punctuation)) + for part in text.split(keep_punctuation_exact_string) + ) + else: + text = text.translate(str.maketrans("", "", string.punctuation)) + text = text.lower() + text = re.sub(r"\s+", " ", text) + return text.strip() + + +class SimpleTokenizer(object): + def __init__( + self, + bpe_path: Union[str, os.PathLike], + additional_special_tokens: Optional[List[str]] = None, + context_length: Optional[int] = DEFAULT_CONTEXT_LENGTH, + clean: str = "lower", + ): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with g_pathmgr.open(bpe_path, "rb") as fh: + bpe_bytes = io.BytesIO(fh.read()) + merges = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") + # merges = gzip.open(bpe_path).read().decode("utf-8").split("\n") + merges = merges[1 : 49152 - 256 - 2 + 1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + "" for v in vocab] + for merge in merges: + vocab.append("".join(merge)) + special_tokens = ["", ""] + if additional_special_tokens: + special_tokens += additional_special_tokens + vocab.extend(special_tokens) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {t: t for t in special_tokens} + special = "|".join(special_tokens) + self.pat = re.compile( + special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + self.vocab_size = len(self.encoder) + self.all_special_ids = [self.encoder[t] for t in special_tokens] + self.sot_token_id = self.all_special_ids[0] + self.eot_token_id = self.all_special_ids[1] + self.context_length = context_length + self.clean_fn = get_clean_fn(clean) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + if not pairs: + return token + "" + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = self.clean_fn(text) + for token in re.findall(self.pat, text): + token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) + bpe_tokens.extend( + self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") + ) + return bpe_tokens + + def decode(self, tokens): + text = "".join([self.decoder[token] for token in tokens]) + text = ( + bytearray([self.byte_decoder[c] for c in text]) + .decode("utf-8", errors="replace") + .replace("", " ") + ) + return text + + def __call__( + self, texts: Union[str, List[str]], context_length: Optional[int] = None + ) -> torch.LongTensor: + """Returns the tokenized representation of given input string(s) + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + context_length : int + The context length to use; all CLIP models use 77 as the context length + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] + """ + if isinstance(texts, str): + texts = [texts] + context_length = context_length or self.context_length + assert context_length, "Please set a valid context length" + all_tokens = [ + [self.sot_token_id] + self.encode(text) + [self.eot_token_id] + for text in texts + ] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + tokens = tokens[:context_length] # Truncate + tokens[-1] = self.eot_token_id + result[i, : len(tokens)] = torch.tensor(tokens) + return result diff --git a/detect_tools/sam3/sam3/model/utils/__init__.py b/detect_tools/sam3/sam3/model/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47d98588ca232dc5bbaa771a23e5afe3e5deaa13 --- /dev/null +++ b/detect_tools/sam3/sam3/model/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/detect_tools/sam3/sam3/model/utils/misc.py b/detect_tools/sam3/sam3/model/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..4f072067d5c91e34bd8a98beea9ff2f02e986189 --- /dev/null +++ b/detect_tools/sam3/sam3/model/utils/misc.py @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from collections import defaultdict +from dataclasses import fields, is_dataclass +from typing import Any, Mapping, Protocol, runtime_checkable + +import torch + + +def _is_named_tuple(x) -> bool: + return isinstance(x, tuple) and hasattr(x, "_asdict") and hasattr(x, "_fields") + + +@runtime_checkable +class _CopyableData(Protocol): + def to(self, device: torch.device, *args: Any, **kwargs: Any): + """Copy data to the specified device""" + ... + + +def copy_data_to_device(data, device: torch.device, *args: Any, **kwargs: Any): + """Function that recursively copies data to a torch.device. + + Args: + data: The data to copy to device + device: The device to which the data should be copied + args: positional arguments that will be passed to the `to` call + kwargs: keyword arguments that will be passed to the `to` call + + Returns: + The data on the correct device + """ + + if _is_named_tuple(data): + return type(data)( + **copy_data_to_device(data._asdict(), device, *args, **kwargs) + ) + elif isinstance(data, (list, tuple)): + return type(data)(copy_data_to_device(e, device, *args, **kwargs) for e in data) + elif isinstance(data, defaultdict): + return type(data)( + data.default_factory, + { + k: copy_data_to_device(v, device, *args, **kwargs) + for k, v in data.items() + }, + ) + elif isinstance(data, Mapping): + return type(data)( + { + k: copy_data_to_device(v, device, *args, **kwargs) + for k, v in data.items() + } + ) + elif is_dataclass(data) and not isinstance(data, type): + new_data_class = type(data)( + **{ + field.name: copy_data_to_device( + getattr(data, field.name), device, *args, **kwargs + ) + for field in fields(data) + if field.init + } + ) + for field in fields(data): + if not field.init: + setattr( + new_data_class, + field.name, + copy_data_to_device( + getattr(data, field.name), device, *args, **kwargs + ), + ) + return new_data_class + elif isinstance(data, _CopyableData): + return data.to(device, *args, **kwargs) + return data diff --git a/detect_tools/sam3/sam3/model/utils/sam1_utils.py b/detect_tools/sam3/sam3/model/utils/sam1_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..18f0d04c91bb0e12904a58d3553b20a97b0f6f35 --- /dev/null +++ b/detect_tools/sam3/sam3/model/utils/sam1_utils.py @@ -0,0 +1,119 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import Normalize, Resize, ToTensor + + +# Adapted from https://github.com/facebookresearch/sam2/blob/main/sam2/utils/transforms.py +class SAM2Transforms(nn.Module): + def __init__( + self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0 + ): + """ + Transforms for SAM2. + """ + super().__init__() + self.resolution = resolution + self.mask_threshold = mask_threshold + self.max_hole_area = max_hole_area + self.max_sprinkle_area = max_sprinkle_area + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + self.to_tensor = ToTensor() + self.transforms = torch.jit.script( + nn.Sequential( + Resize((self.resolution, self.resolution)), + Normalize(self.mean, self.std), + ) + ) + + def __call__(self, x): + x = self.to_tensor(x) + return self.transforms(x) + + def forward_batch(self, img_list): + img_batch = [self.transforms(self.to_tensor(img)) for img in img_list] + img_batch = torch.stack(img_batch, dim=0) + return img_batch + + def transform_coords( + self, coords: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates, + If the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + + Returns + Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model. + """ + if normalize: + assert orig_hw is not None + h, w = orig_hw + coords = coords.clone() + coords[..., 0] = coords[..., 0] / w + coords[..., 1] = coords[..., 1] / h + + coords = coords * self.resolution # unnormalize coords + return coords + + def transform_boxes( + self, boxes: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates, + if the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + """ + boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw) + return boxes + + def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor: + """ + Perform PostProcessing on output masks. + """ + masks = masks.float() + input_masks = masks + mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image + try: + from sam3.perflib.connected_components import connected_components + + if self.max_hole_area > 0: + # Holes are those connected components in background with area <= self.fill_hole_area + # (background regions are those with mask scores <= self.mask_threshold) + labels, areas = connected_components( + (mask_flat <= self.mask_threshold).to(torch.uint8) + ) + is_hole = (labels > 0) & (areas <= self.max_hole_area) + is_hole = is_hole.reshape_as(masks) + # We fill holes with a small positive mask score (10.0) to change them to foreground. + masks = torch.where(is_hole, self.mask_threshold + 10.0, masks) + + if self.max_sprinkle_area > 0: + labels, areas = connected_components( + (mask_flat > self.mask_threshold).to(torch.uint8) + ) + is_hole = (labels > 0) & (areas <= self.max_sprinkle_area) + is_hole = is_hole.reshape_as(masks) + # We fill holes with negative mask score (-10.0) to change them to background. + masks = torch.where(is_hole, self.mask_threshold - 10.0, masks) + except Exception as e: + # Skip the post-processing step if the CUDA kernel fails + warnings.warn( + f"{e}\n\nSkipping the post-processing step due to the error above. You can " + "still use SAM 3 and it's OK to ignore the error above, although some post-processing " + "functionality may be limited (which doesn't affect the results in most cases; see " + "https://github.com/facebookresearch/sam3/blob/main/INSTALL.md).", + category=UserWarning, + stacklevel=2, + ) + masks = input_masks + + masks = F.interpolate(masks, orig_hw, mode="bilinear", align_corners=False) + return masks diff --git a/detect_tools/sam3/sam3/model/utils/sam2_utils.py b/detect_tools/sam3/sam3/model/utils/sam2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3d12e8c49ba96a2d375d4b95898f88290fa6a9 --- /dev/null +++ b/detect_tools/sam3/sam3/model/utils/sam2_utils.py @@ -0,0 +1,233 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +from threading import Thread + +import numpy as np +import torch +from PIL import Image +from tqdm import tqdm + + +def _load_img_as_tensor(img_path, image_size): + img_pil = Image.open(img_path) + img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size))) + if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images + img_np = img_np / 255.0 + else: + raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}") + img = torch.from_numpy(img_np).permute(2, 0, 1) + video_width, video_height = img_pil.size # the original video size + return img, video_height, video_width + + +class AsyncVideoFrameLoader: + """ + A list of video frames to be load asynchronously without blocking session start. + """ + + def __init__( + self, + img_paths, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + compute_device, + ): + self.img_paths = img_paths + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + self.img_mean = img_mean + self.img_std = img_std + # items in `self.images` will be loaded asynchronously + self.images = [None] * len(img_paths) + # catch and raise any exceptions in the async loading thread + self.exception = None + # video_height and video_width be filled when loading the first image + self.video_height = None + self.video_width = None + self.compute_device = compute_device + + # load the first frame to fill video_height and video_width and also + # to cache it (since it's most likely where the user will click) + self.__getitem__(0) + + # load the rest of frames asynchronously without blocking the session start + def _load_frames(): + try: + for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"): + self.__getitem__(n) + except Exception as e: + self.exception = e + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + img = self.images[index] + if img is not None: + return img + + img, video_height, video_width = _load_img_as_tensor( + self.img_paths[index], self.image_size + ) + self.video_height = video_height + self.video_width = video_width + # normalize by mean and std + img -= self.img_mean + img /= self.img_std + if not self.offload_video_to_cpu: + img = img.to(self.compute_device, non_blocking=True) + self.images[index] = img + return img + + def __len__(self): + return len(self.images) + + +def load_video_frames( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + async_loading_frames=False, + compute_device=torch.device("cuda"), +): + """ + Load the video frames from video_path. The frames are resized to image_size as in + the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo. + """ + is_bytes = isinstance(video_path, bytes) + is_str = isinstance(video_path, str) + is_mp4_path = is_str and os.path.splitext(video_path)[-1] in [".mp4", ".MP4"] + if is_bytes or is_mp4_path: + return load_video_frames_from_video_file( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + compute_device=compute_device, + ) + elif is_str and os.path.isdir(video_path): + return load_video_frames_from_jpg_images( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + compute_device=compute_device, + ) + else: + raise NotImplementedError( + "Only MP4 video and JPEG folder are supported at this moment" + ) + + +def load_video_frames_from_jpg_images( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + async_loading_frames=False, + compute_device=torch.device("cuda"), +): + """ + Load the video frames from a directory of JPEG files (".jpg" format). + + The frames are resized to image_size x image_size and are loaded to GPU if + `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`. + + You can load a frame asynchronously by setting `async_loading_frames` to `True`. + """ + if isinstance(video_path, str) and os.path.isdir(video_path): + jpg_folder = video_path + else: + raise NotImplementedError( + "Only JPEG frames are supported at this moment. For video files, you may use " + "ffmpeg (https://ffmpeg.org/) to extract frames into a folder of JPEG files, such as \n" + "```\n" + "ffmpeg -i .mp4 -q:v 2 -start_number 0 /'%05d.jpg'\n" + "```\n" + "where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks " + "ffmpeg to start the JPEG file from 00000.jpg." + ) + + frame_names = [ + p + for p in os.listdir(jpg_folder) + if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"] + ] + frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) + num_frames = len(frame_names) + if num_frames == 0: + raise RuntimeError(f"no images found in {jpg_folder}") + img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names] + img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None] + + if async_loading_frames: + lazy_images = AsyncVideoFrameLoader( + img_paths, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + compute_device, + ) + return lazy_images, lazy_images.video_height, lazy_images.video_width + + images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32) + for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")): + images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size) + if not offload_video_to_cpu: + images = images.to(compute_device) + img_mean = img_mean.to(compute_device) + img_std = img_std.to(compute_device) + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width + + +def load_video_frames_from_video_file( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + compute_device=torch.device("cuda"), +): + """Load the video frames from a video file.""" + import decord + + img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None] + # Get the original video height and width + decord.bridge.set_bridge("torch") + video_height, video_width, _ = decord.VideoReader(video_path).next().shape + # Iterate over all frames in the video + images = [] + for frame in decord.VideoReader(video_path, width=image_size, height=image_size): + images.append(frame.permute(2, 0, 1)) + + images = torch.stack(images, dim=0).float() / 255.0 + if not offload_video_to_cpu: + images = images.to(compute_device) + img_mean = img_mean.to(compute_device) + img_std = img_std.to(compute_device) + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width diff --git a/detect_tools/sam3/sam3/model/vitdet.py b/detect_tools/sam3/sam3/model/vitdet.py new file mode 100644 index 0000000000000000000000000000000000000000..aa56664d8f011e2d9bc9f3004e1240dda835c2ac --- /dev/null +++ b/detect_tools/sam3/sam3/model/vitdet.py @@ -0,0 +1,879 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +ViTDet backbone adapted from Detectron2. +This module implements Vision Transformer (ViT) backbone for object detection. + +Rope embedding code adopted from: +1. https://github.com/meta-llama/codellama/blob/main/llama/model.py +2. https://github.com/naver-ai/rope-vit +3. https://github.com/lucidrains/rotary-embedding-torch +""" + +import math +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +try: + from timm.layers import DropPath, Mlp, trunc_normal_ +except ModuleNotFoundError: + # compatibility for older timm versions + from timm.models.layers import DropPath, Mlp, trunc_normal_ +from torch import Tensor + +from .model_misc import LayerScale + + +def init_t_xy( + end_x: int, end_y: int, scale: float = 1.0, offset: int = 0 +) -> Tuple[torch.Tensor, torch.Tensor]: + t = torch.arange(end_x * end_y, dtype=torch.float32) + t_x = (t % end_x).float() + t_y = torch.div(t, end_x, rounding_mode="floor").float() + return t_x * scale + offset, t_y * scale + offset + + +def compute_axial_cis( + dim: int, + end_x: int, + end_y: int, + theta: float = 10000.0, + scale_pos: float = 1.0, + offset: int = 0, +) -> torch.Tensor: + freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + + t_x, t_y = init_t_xy(end_x, end_y, scale_pos, offset) + freqs_x = torch.outer(t_x, freqs_x) + freqs_y = torch.outer(t_y, freqs_y) + freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x) + freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y) + return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1) + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor) -> torch.Tensor: + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[-2], x.shape[-1]) + shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_enc( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, + repeat_freqs_k: bool = False, +) -> Tuple[torch.Tensor, torch.Tensor]: + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = ( + torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + if xk.shape[-2] != 0 + else None + ) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + if xk_ is None: + # no keys to rotate, due to dropout + return xq_out.type_as(xq).to(xq.device), xk + # repeat freqs along seq_len dim to match k seq_len + if repeat_freqs_k: + r = xk_.shape[-2] // xq_.shape[-2] + freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) + + +def window_partition(x: Tensor, window_size: int) -> Tuple[Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).reshape(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.reshape( + B, Hp // window_size, Wp // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :] + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: Tensor) -> Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + align_corners=False, + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def get_abs_pos( + abs_pos: Tensor, + has_cls_token: bool, + hw: Tuple[int, int], + retain_cls_token: bool = False, + tiling: bool = False, +) -> Tensor: + """ + Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token + dimension for the original embeddings. + Args: + abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). + has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. + hw (Tuple): size of input image tokens. + retain_cls_token: whether to retain the cls_token + tiling: whether to tile the embeddings, *instead* of interpolation (a la abs_win) + Returns: + Absolute positional embeddings after processing with shape (1, H, W, C), + if retain_cls_token is False, otherwise (1, 1+H*W, C) + """ + if retain_cls_token: + assert has_cls_token + + h, w = hw + if has_cls_token: + cls_pos = abs_pos[:, :1] + abs_pos = abs_pos[:, 1:] + + xy_num = abs_pos.shape[1] + size = int(math.sqrt(xy_num)) + assert size * size == xy_num + + if size != h or size != w: + new_abs_pos = abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2) + if tiling: + new_abs_pos = new_abs_pos.tile( + [1, 1] + [x // y + 1 for x, y in zip((h, w), new_abs_pos.shape[2:])] + )[:, :, :h, :w] + else: + new_abs_pos = F.interpolate( + new_abs_pos, + size=(h, w), + mode="bicubic", + align_corners=False, + ) + + if not retain_cls_token: + return new_abs_pos.permute(0, 2, 3, 1) + else: + # add cls_token back, flatten spatial dims + assert has_cls_token + return torch.cat( + [cls_pos, new_abs_pos.permute(0, 2, 3, 1).reshape(1, h * w, -1)], + dim=1, + ) + + else: + if not retain_cls_token: + return abs_pos.reshape(1, h, w, -1) + else: + assert has_cls_token + return torch.cat([cls_pos, abs_pos], dim=1) + + +def concat_rel_pos( + q: Tensor, + k: Tensor, + q_hw: Tuple[int, int], + k_hw: Tuple[int, int], + rel_pos_h: Tensor, + rel_pos_w: Tensor, + rescale: bool = False, + relative_coords: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor]: + """ + Concatenate rel pos coeffs to the q & k tensors, so that qk^T is now + effectively including rel pos biases. + Args: + q (Tensor): q tensor with shape (B, L_q, C). + k (Tensor): k tensor with shape (B, L_k, C). + q_hw, k_hw: These are spatial size of q & k tensors. + rel_pos_h, rel_pos_w: These are relative pos embeddings/params of height, width. + rescale (bool): whether to rescale. e.g. for use when using sdpa, pytorch will + scale by the wrong factor due to the concat. + Returns: + q, k: But, padded so that qk^T accounts for rel pos biases + """ + q_h, q_w = q_hw + k_h, k_w = k_hw + + assert (q_h == q_w) and (k_h == k_w), "only square inputs supported" + + if relative_coords is not None: + Rh = rel_pos_h[relative_coords] + Rw = rel_pos_w[relative_coords] + else: + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + + old_scale = dim**0.5 + new_scale = (dim + k_h + k_w) ** 0.5 if rescale else old_scale # for sdpa + # attn will be divided by new_scale, but we want to divide q by old_scale + scale_ratio = new_scale / old_scale + + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) * new_scale # (B, q_h, q_w, k_h) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) * new_scale # (B, q_h, q_w, k_w) + + eye_h = torch.eye(k_h, dtype=q.dtype, device=q.device) + eye_w = torch.eye(k_w, dtype=q.dtype, device=q.device) + + eye_h = eye_h.view(1, k_h, 1, k_h).expand([B, k_h, k_w, k_h]) + eye_w = eye_w.view(1, 1, k_w, k_w).expand([B, k_h, k_w, k_w]) + + q = torch.cat([r_q * scale_ratio, rel_h, rel_w], dim=-1).view(B, q_h * q_w, -1) + k = torch.cat([k.view(B, k_h, k_w, -1), eye_h, eye_w], dim=-1).view( + B, k_h * k_w, -1 + ) + + return q, k + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + bias: bool = True, + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, + embed_dim, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias, + ) + + def forward(self, x: Tensor) -> Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings and 2d-rope.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + cls_token: bool = False, + use_rope: bool = False, + rope_theta: float = 10000.0, + rope_pt_size: Optional[Tuple[int, int]] = None, + rope_interp: bool = False, + ): + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution for calculating the relative positional + parameter size or rope size. + attn_type: Type of attention operation, e.g. "vanilla", "vanilla-xformer". + cls_token: whether a cls_token is present. + use_rope: whether to use rope 2d (indep of use_rel_pos, as it can be used together) + rope_theta: control frequencies of rope + rope_pt_size: size of rope in previous stage of training, needed for interpolation or tiling + rope_interp: whether to interpolate (or extrapolate) rope to match input size + """ + super().__init__() + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim**-0.5 + self.cls_token = cls_token + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + # rel_pos embeddings and rope + self.use_rel_pos = use_rel_pos + self.input_size = input_size + + self.use_rope = use_rope + self.rope_theta = rope_theta + self.rope_pt_size = rope_pt_size + self.rope_interp = rope_interp + + # init rel_pos embeddings and rope + self._setup_rel_pos(rel_pos_zero_init) + self._setup_rope_freqs() + + def _setup_rel_pos(self, rel_pos_zero_init: bool = True) -> None: + if not self.use_rel_pos: + self.rel_pos_h = None + self.rel_pos_w = None + return + + assert self.input_size is not None + assert self.cls_token is False, "not supported" + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter( + torch.zeros(2 * self.input_size[0] - 1, self.head_dim) + ) + self.rel_pos_w = nn.Parameter( + torch.zeros(2 * self.input_size[1] - 1, self.head_dim) + ) + + if not rel_pos_zero_init: + trunc_normal_(self.rel_pos_h, std=0.02) + trunc_normal_(self.rel_pos_w, std=0.02) + + # Precompute the relative coords + H, W = self.input_size + q_coords = torch.arange(H)[:, None] + k_coords = torch.arange(W)[None, :] + relative_coords = (q_coords - k_coords) + (H - 1) + self.register_buffer("relative_coords", relative_coords.long()) + + def _setup_rope_freqs(self) -> None: + if not self.use_rope: + self.freqs_cis = None + return + + assert self.input_size is not None + # determine rope input size + if self.rope_pt_size is None: + self.rope_pt_size = self.input_size + + # initialize 2d rope freqs + self.compute_cis = partial( + compute_axial_cis, + dim=self.head_dim, + theta=self.rope_theta, + ) + + # interpolate rope + scale_pos = 1.0 + if self.rope_interp: + scale_pos = self.rope_pt_size[0] / self.input_size[0] + # get scaled freqs_cis + freqs_cis = self.compute_cis( + end_x=self.input_size[0], + end_y=self.input_size[1], + scale_pos=scale_pos, + ) + if self.cls_token: + t = torch.zeros( + self.head_dim // 2, + dtype=torch.float32, + device=freqs_cis.device, + ) + cls_freqs_cis = torch.polar(torch.ones_like(t), t)[None, :] + freqs_cis = torch.cat([cls_freqs_cis, freqs_cis], dim=0) + + self.register_buffer("freqs_cis", freqs_cis) + + def _apply_rope(self, q, k) -> Tuple[Tensor, Tensor]: + if not self.use_rope: + return q, k + + assert self.freqs_cis is not None + return apply_rotary_enc(q, k, freqs_cis=self.freqs_cis) + + def forward(self, x: Tensor) -> Tensor: + s = 1 if self.cls_token else 0 # used to exclude cls_token + if x.ndim == 4: + B, H, W, _ = x.shape + assert s == 0 # no cls_token + L = H * W + ndim = 4 + else: + assert x.ndim == 3 + B, L, _ = x.shape + ndim = 3 + H = W = math.sqrt(L - s) + + # qkv with shape (3, B, nHead, L, C) + qkv = self.qkv(x).reshape(B, L, 3, self.num_heads, -1) + # q, k, v with shape (B, nHead, L, C) + q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0) + + # handle rope and rel pos embeddings + q, k = self._apply_rope(q, k) + if self.use_rel_pos: + q, k = concat_rel_pos( + q.flatten(0, 1), + k.flatten(0, 1), + (H, W), + x.shape[1:3], + self.rel_pos_h, + self.rel_pos_w, + rescale=True, + relative_coords=self.relative_coords, + ) + + # sdpa expects [B, nheads, H*W, C] so we transpose back + q = q.reshape(B, self.num_heads, H * W, -1) + k = k.reshape(B, self.num_heads, H * W, -1) + + x = F.scaled_dot_product_attention(q, k, v) + + if ndim == 4: + x = ( + x.view(B, self.num_heads, H, W, -1) + .permute(0, 2, 3, 1, 4) + .reshape(B, H, W, -1) + ) + else: + x = x.view(B, self.num_heads, L, -1).permute(0, 2, 1, 3).reshape(B, L, -1) + + x = self.proj(x) + + return x + + +class Block(nn.Module): + """Transformer blocks with support of window attention""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + drop_path: float = 0.0, + norm_layer: Callable[..., nn.Module] = nn.LayerNorm, + act_layer: Callable[..., nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + use_rope: bool = False, + rope_pt_size: Optional[Tuple[int, int]] = None, + rope_tiled: bool = False, + rope_interp: bool = False, + use_ve_rope: bool = False, + cls_token: bool = False, + dropout: float = 0.0, + init_values: Optional[float] = None, + ): + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then not + use window attention. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + dropout (float): Dropout rate. + cls_token: whether a cls_token is present. + use_rope: whether to use rope 2d (indep of use_rel_pos, as it can be used together) + rope_pt_size: size of rope in previous stage of training, needed for interpolation or tiling + rope_interp: whether to interpolate (or extrapolate) rope to match target input size, + expected to specify source size as rope_pt_size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + use_rope=use_rope, + rope_pt_size=rope_pt_size, + rope_interp=rope_interp, + cls_token=cls_token, + ) + self.ls1 = ( + LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=(dropout, 0.0), + ) + self.ls2 = ( + LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + ) + self.dropout = nn.Dropout(dropout) + self.window_size = window_size + + def forward(self, x: Tensor) -> Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.ls1(self.attn(x)) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + self.dropout(self.drop_path(x)) + x = x + self.dropout(self.drop_path(self.ls2(self.mlp(self.norm2(x))))) + + return x + + +class ViT(nn.Module): + """ + This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`. + "Exploring Plain Vision Transformer Backbones for Object Detection", + https://arxiv.org/abs/2203.16527 + """ + + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + drop_path_rate: float = 0.0, + norm_layer: Union[Callable[..., nn.Module], str] = "LayerNorm", + act_layer: Callable[..., nn.Module] = nn.GELU, + use_abs_pos: bool = True, + tile_abs_pos: bool = True, + rel_pos_blocks: Union[Tuple[int, ...], bool] = (2, 5, 8, 11), + rel_pos_zero_init: bool = True, + window_size: int = 14, + global_att_blocks: Tuple[int, ...] = (2, 5, 8, 11), + use_rope: bool = False, + rope_pt_size: Optional[int] = None, + use_interp_rope: bool = False, + pretrain_img_size: int = 224, + pretrain_use_cls_token: bool = True, + retain_cls_token: bool = True, + dropout: float = 0.0, + return_interm_layers: bool = False, + init_values: Optional[float] = None, # for layerscale + ln_pre: bool = False, + ln_post: bool = False, + bias_patch_embed: bool = True, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = True, + ): + """ + Args: + img_size (int): Input image size. Only relevant for rel pos or rope. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path_rate (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + tile_abs_pos (bool): If True, tile absolute positional embeddings instead of interpolation. + rel_pos_blocks (list): Blocks which have rel pos embeddings. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_att_blocks (list): Indexes for blocks using global attention (other blocks use window attention). + use_rope (bool): whether to use rope 2d (indep of rel_pos_blocks, as it can be used together). + rope_pt_size (int): size of rope in previous stage of training, needed for interpolation or tiling. + use_interp_rope: whether to interpolate (or extrapolate) rope to match target input size, + expected to specify source size as rope_pt_size. + use_act_checkpoint (bool): If True, use activation checkpointing. + pretrain_img_size (int): input image size for pretraining models. + pretrain_use_cls_token (bool): If True, pretraining models use class token. + retain_cls_token: whether cls_token should be retained. + dropout (float): Dropout rate. Applied in residual blocks of attn, mlp and inside the mlp. + + return_interm_layers (bool): Whether to return intermediate layers (all global attention blocks). + init_values: layer scale init, None for no layer scale. + + ln_pre (bool): If True, apply layer norm before transformer blocks. + ln_post (bool): If True, apply layer norm after transformer blocks. + bias_patch_embed (bool): bias in conv for patch embed? + compile_mode (str): mode to compile the forward + """ + super().__init__() + self.pretrain_use_cls_token = pretrain_use_cls_token + + window_block_indexes = [i for i in range(depth) if i not in global_att_blocks] + self.full_attn_ids = list(global_att_blocks) + self.rel_pos_blocks = [False] * depth + if isinstance(rel_pos_blocks, bool) and rel_pos_blocks: + self.rel_pos_blocks = [True] * depth + else: + for i in rel_pos_blocks: + self.rel_pos_blocks[i] = True + + self.retain_cls_token = retain_cls_token + if self.retain_cls_token: + assert pretrain_use_cls_token + assert ( + len(window_block_indexes) == 0 + ), "windowing not supported with cls token" + + assert sum(self.rel_pos_blocks) == 0, "rel pos not supported with cls token" + + scale = embed_dim**-0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(1, 1, embed_dim)) + + if isinstance(norm_layer, str): + norm_layer = partial(getattr(nn, norm_layer), eps=1e-5) + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + bias=bias_patch_embed, + ) + + # Handle absolute positional embedding + self.tile_abs_pos = tile_abs_pos + self.use_abs_pos = use_abs_pos + if self.tile_abs_pos: + assert self.use_abs_pos + + if self.use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + num_patches = (pretrain_img_size // patch_size) * ( + pretrain_img_size // patch_size + ) + num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) + else: + self.pos_embed = None + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + self.blocks = nn.ModuleList() + cur_stage = 1 + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=self.rel_pos_blocks[i], + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i in window_block_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + use_rope=use_rope, + rope_pt_size=( + (window_size, window_size) + if rope_pt_size is None + else (rope_pt_size, rope_pt_size) + ), + rope_interp=use_interp_rope, + cls_token=self.retain_cls_token, + dropout=dropout, + init_values=init_values, + ) + + if i not in window_block_indexes: + cur_stage += 1 + + self.use_act_checkpoint = use_act_checkpoint + + self.blocks.append(block) + + self.return_interm_layers = return_interm_layers + self.channel_list = ( + [embed_dim] * len(self.full_attn_ids) + if return_interm_layers + else [embed_dim] + ) + + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=0.02) + + self.ln_pre = norm_layer(embed_dim) if ln_pre else nn.Identity() + self.ln_post = norm_layer(embed_dim) if ln_post else nn.Identity() + + self.apply(self._init_weights) + + if compile_mode is not None: + self.forward = torch.compile( + self.forward, mode=compile_mode, fullgraph=True + ) + if self.use_act_checkpoint and self.training: + torch._dynamo.config.optimize_ddp = False + + def _init_weights(self, m: nn.Module) -> None: + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + x = self.patch_embed(x) + h, w = x.shape[1], x.shape[2] + + s = 0 + if self.retain_cls_token: + # If cls_token is retained, we don't + # maintain spatial shape + x = torch.cat([self.class_embedding, x.flatten(1, 2)], dim=1) + s = 1 + + if self.pos_embed is not None: + x = x + get_abs_pos( + self.pos_embed, + self.pretrain_use_cls_token, + (h, w), + self.retain_cls_token, + tiling=self.tile_abs_pos, + ) + + x = self.ln_pre(x) + + outputs = [] + for i, blk in enumerate(self.blocks): + if self.use_act_checkpoint and self.training: + x = checkpoint.checkpoint(blk, x, use_reentrant=False) + else: + x = blk(x) + if (i == self.full_attn_ids[-1]) or ( + self.return_interm_layers and i in self.full_attn_ids + ): + if i == self.full_attn_ids[-1]: + x = self.ln_post(x) + + feats = x[:, s:] + if feats.ndim == 4: + feats = feats.permute(0, 3, 1, 2) + else: + assert feats.ndim == 3 + h = w = math.sqrt(feats.shape[1]) + feats = feats.reshape( + feats.shape[0], h, w, feats.shape[-1] + ).permute(0, 3, 1, 2) + + outputs.append(feats) + + return outputs + + def get_layer_id(self, layer_name: str) -> int: + # https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 + num_layers = self.get_num_layers() + + if layer_name.find("rel_pos") != -1: + return num_layers + 1 + elif layer_name.find("ln_pre") != -1: + return 0 + elif layer_name.find("pos_embed") != -1 or layer_name.find("cls_token") != -1: + return 0 + elif layer_name.find("patch_embed") != -1: + return 0 + elif layer_name.find("blocks") != -1: + return int(layer_name.split("blocks")[1].split(".")[1]) + 1 + else: + return num_layers + 1 + + def get_num_layers(self) -> int: + return len(self.blocks) diff --git a/detect_tools/sam3/sam3/model/vl_combiner.py b/detect_tools/sam3/sam3/model/vl_combiner.py new file mode 100644 index 0000000000000000000000000000000000000000..43bc7bd5e8fda4f4ed1e96cd7931b254f013616c --- /dev/null +++ b/detect_tools/sam3/sam3/model/vl_combiner.py @@ -0,0 +1,176 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Provides utility to combine a vision backbone with a language backbone.""" + +from copy import copy +from typing import List, Optional + +import torch +import torch.nn as nn + +from torch.nn.attention import sdpa_kernel, SDPBackend + +from .act_ckpt_utils import activation_ckpt_wrapper +from .necks import Sam3DualViTDetNeck + + +class SAM3VLBackbone(nn.Module): + """This backbone combines a vision backbone and a language backbone without fusion. + As such it is more of a convenience wrapper to handle the two backbones together. + + It adds support for activation checkpointing and compilation. + """ + + def __init__( + self, + visual: Sam3DualViTDetNeck, + text, + compile_visual: bool = False, + act_ckpt_whole_vision_backbone: bool = False, + act_ckpt_whole_language_backbone: bool = False, + scalp=0, + ): + """Initialize the backbone combiner. + + :param visual: The vision backbone to use + :param text: The text encoder to use + """ + super().__init__() + self.vision_backbone: Sam3DualViTDetNeck = ( + torch.compile(visual) if compile_visual else visual + ) + self.language_backbone = text + self.scalp = scalp + # allow running activation checkpointing on the entire vision and language backbones + self.act_ckpt_whole_vision_backbone = act_ckpt_whole_vision_backbone + self.act_ckpt_whole_language_backbone = act_ckpt_whole_language_backbone + + def forward( + self, + samples: torch.Tensor, + captions: List[str], + input_boxes: Optional[torch.Tensor] = None, + additional_text: Optional[List[str]] = None, + ): + """Forward pass of the backbone combiner. + + :param samples: The input images + :param captions: The input captions + :param input_boxes: If the text contains place-holders for boxes, this + parameter contains the tensor containing their spatial features + :param additional_text: This can be used to encode some additional text + (different from the captions) in the same forward of the backbone + :return: Output dictionary with the following keys: + - vision_features: The output of the vision backbone + - language_features: The output of the language backbone + - language_mask: The attention mask of the language backbone + - vision_pos_enc: The positional encoding of the vision backbone + - (optional) additional_text_features: The output of the language + backbone for the additional text + - (optional) additional_text_mask: The attention mask of the + language backbone for the additional text + """ + output = self.forward_image(samples) + device = output["vision_features"].device + output.update(self.forward_text(captions, input_boxes, additional_text, device)) + return output + + def forward_image(self, samples: torch.Tensor): + return activation_ckpt_wrapper(self._forward_image_no_act_ckpt)( + samples=samples, + act_ckpt_enable=self.act_ckpt_whole_vision_backbone and self.training, + ) + + def _forward_image_no_act_ckpt(self, samples): + # Forward through backbone + sam3_features, sam3_pos, sam2_features, sam2_pos = self.vision_backbone.forward( + samples + ) + if self.scalp > 0: + # Discard the lowest resolution features + sam3_features, sam3_pos = ( + sam3_features[: -self.scalp], + sam3_pos[: -self.scalp], + ) + if sam2_features is not None and sam2_pos is not None: + sam2_features, sam2_pos = ( + sam2_features[: -self.scalp], + sam2_pos[: -self.scalp], + ) + + sam2_output = None + + if sam2_features is not None and sam2_pos is not None: + sam2_src = sam2_features[-1] + sam2_output = { + "vision_features": sam2_src, + "vision_pos_enc": sam2_pos, + "backbone_fpn": sam2_features, + } + + sam3_src = sam3_features[-1] + output = { + "vision_features": sam3_src, + "vision_pos_enc": sam3_pos, + "backbone_fpn": sam3_features, + "sam2_backbone_out": sam2_output, + } + + return output + + def forward_text( + self, captions, input_boxes=None, additional_text=None, device="cuda" + ): + return activation_ckpt_wrapper(self._forward_text_no_ack_ckpt)( + captions=captions, + input_boxes=input_boxes, + additional_text=additional_text, + device=device, + act_ckpt_enable=self.act_ckpt_whole_language_backbone and self.training, + ) + + def _forward_text_no_ack_ckpt( + self, + captions, + input_boxes=None, + additional_text=None, + device="cuda", + ): + output = {} + + # Forward through text_encoder + text_to_encode = copy(captions) + if additional_text is not None: + # if there are additional_text, we piggy-back them into this forward. + # They'll be used later for output alignment + text_to_encode += additional_text + + sdpa_context = sdpa_kernel( + [ + SDPBackend.MATH, + SDPBackend.EFFICIENT_ATTENTION, + SDPBackend.FLASH_ATTENTION, + ] + ) + + with sdpa_context: + text_attention_mask, text_memory, text_embeds = self.language_backbone( + text_to_encode, input_boxes, device=device + ) + + if additional_text is not None: + output["additional_text_features"] = text_memory[:, -len(additional_text) :] + output["additional_text_mask"] = text_attention_mask[ + -len(additional_text) : + ] + + text_memory = text_memory[:, : len(captions)] + text_attention_mask = text_attention_mask[: len(captions)] + text_embeds = text_embeds[:, : len(captions)] + output["language_features"] = text_memory + output["language_mask"] = text_attention_mask + output["language_embeds"] = ( + text_embeds # Text embeddings before forward to the encoder + ) + + return output diff --git a/detect_tools/sam3/sam3/model_builder.py b/detect_tools/sam3/sam3/model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..058bbec3c2f0bbba6df0aefb5d513ff0d8aa9937 --- /dev/null +++ b/detect_tools/sam3/sam3/model_builder.py @@ -0,0 +1,793 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import os +from typing import Optional + +import torch +import torch.nn as nn +from huggingface_hub import hf_hub_download +from iopath.common.file_io import g_pathmgr +from sam3.model.decoder import ( + TransformerDecoder, + TransformerDecoderLayer, + TransformerDecoderLayerv2, + TransformerEncoderCrossAttention, +) +from sam3.model.encoder import TransformerEncoderFusion, TransformerEncoderLayer +from sam3.model.geometry_encoders import SequenceGeometryEncoder +from sam3.model.maskformer_segmentation import PixelDecoder, UniversalSegmentationHead +from sam3.model.memory import ( + CXBlock, + SimpleFuser, + SimpleMaskDownSampler, + SimpleMaskEncoder, +) +from sam3.model.model_misc import ( + DotProductScoring, + MLP, + MultiheadAttentionWrapper as MultiheadAttention, + TransformerWrapper, +) +from sam3.model.necks import Sam3DualViTDetNeck +from sam3.model.position_encoding import PositionEmbeddingSine +from sam3.model.sam1_task_predictor import SAM3InteractiveImagePredictor +from sam3.model.sam3_image import Sam3Image, Sam3ImageOnVideoMultiGPU +from sam3.model.sam3_tracking_predictor import Sam3TrackerPredictor +from sam3.model.sam3_video_inference import Sam3VideoInferenceWithInstanceInteractivity +from sam3.model.sam3_video_predictor import Sam3VideoPredictorMultiGPU +from sam3.model.text_encoder_ve import VETextEncoder +from sam3.model.tokenizer_ve import SimpleTokenizer +from sam3.model.vitdet import ViT +from sam3.model.vl_combiner import SAM3VLBackbone +from sam3.sam.transformer import RoPEAttention + + +# Setup TensorFloat-32 for Ampere GPUs if available +def _setup_tf32() -> None: + """Enable TensorFloat-32 for Ampere GPUs if available.""" + if torch.cuda.is_available(): + device_props = torch.cuda.get_device_properties(0) + if device_props.major >= 8: + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + + +_setup_tf32() + + +def _create_position_encoding(precompute_resolution=None): + """Create position encoding for visual backbone.""" + return PositionEmbeddingSine( + num_pos_feats=256, + normalize=True, + scale=None, + temperature=10000, + precompute_resolution=precompute_resolution, + ) + + +def _create_vit_backbone(compile_mode=None): + """Create ViT backbone for visual feature extraction.""" + return ViT( + img_size=1008, + pretrain_img_size=336, + patch_size=14, + embed_dim=1024, + depth=32, + num_heads=16, + mlp_ratio=4.625, + norm_layer="LayerNorm", + drop_path_rate=0.1, + qkv_bias=True, + use_abs_pos=True, + tile_abs_pos=True, + global_att_blocks=(7, 15, 23, 31), + rel_pos_blocks=(), + use_rope=True, + use_interp_rope=True, + window_size=24, + pretrain_use_cls_token=True, + retain_cls_token=False, + ln_pre=True, + ln_post=False, + return_interm_layers=False, + bias_patch_embed=False, + compile_mode=compile_mode, + ) + + +def _create_vit_neck(position_encoding, vit_backbone, enable_inst_interactivity=False): + """Create ViT neck for feature pyramid.""" + return Sam3DualViTDetNeck( + position_encoding=position_encoding, + d_model=256, + scale_factors=[4.0, 2.0, 1.0, 0.5], + trunk=vit_backbone, + add_sam2_neck=enable_inst_interactivity, + ) + + +def _create_vl_backbone(vit_neck, text_encoder): + """Create visual-language backbone.""" + return SAM3VLBackbone(visual=vit_neck, text=text_encoder, scalp=1) + + +def _create_transformer_encoder() -> TransformerEncoderFusion: + """Create transformer encoder with its layer.""" + encoder_layer = TransformerEncoderLayer( + activation="relu", + d_model=256, + dim_feedforward=2048, + dropout=0.1, + pos_enc_at_attn=True, + pos_enc_at_cross_attn_keys=False, + pos_enc_at_cross_attn_queries=False, + pre_norm=True, + self_attention=MultiheadAttention( + num_heads=8, + dropout=0.1, + embed_dim=256, + batch_first=True, + ), + cross_attention=MultiheadAttention( + num_heads=8, + dropout=0.1, + embed_dim=256, + batch_first=True, + ), + ) + + encoder = TransformerEncoderFusion( + layer=encoder_layer, + num_layers=6, + d_model=256, + num_feature_levels=1, + frozen=False, + use_act_checkpoint=True, + add_pooled_text_to_img_feat=False, + pool_text_with_mask=True, + ) + return encoder + + +def _create_transformer_decoder() -> TransformerDecoder: + """Create transformer decoder with its layer.""" + decoder_layer = TransformerDecoderLayer( + activation="relu", + d_model=256, + dim_feedforward=2048, + dropout=0.1, + cross_attention=MultiheadAttention( + num_heads=8, + dropout=0.1, + embed_dim=256, + ), + n_heads=8, + use_text_cross_attention=True, + ) + + decoder = TransformerDecoder( + layer=decoder_layer, + num_layers=6, + num_queries=200, + return_intermediate=True, + box_refine=True, + num_o2m_queries=0, + dac=True, + boxRPB="log", + d_model=256, + frozen=False, + interaction_layer=None, + dac_use_selfatt_ln=True, + resolution=1008, + stride=14, + use_act_checkpoint=True, + presence_token=True, + ) + return decoder + + +def _create_dot_product_scoring(): + """Create dot product scoring module.""" + prompt_mlp = MLP( + input_dim=256, + hidden_dim=2048, + output_dim=256, + num_layers=2, + dropout=0.1, + residual=True, + out_norm=nn.LayerNorm(256), + ) + return DotProductScoring(d_model=256, d_proj=256, prompt_mlp=prompt_mlp) + + +def _create_segmentation_head(compile_mode=None): + """Create segmentation head with pixel decoder.""" + pixel_decoder = PixelDecoder( + num_upsampling_stages=3, + interpolation_mode="nearest", + hidden_dim=256, + compile_mode=compile_mode, + ) + + cross_attend_prompt = MultiheadAttention( + num_heads=8, + dropout=0, + embed_dim=256, + ) + + segmentation_head = UniversalSegmentationHead( + hidden_dim=256, + upsampling_stages=3, + aux_masks=False, + presence_head=False, + dot_product_scorer=None, + act_ckpt=True, + cross_attend_prompt=cross_attend_prompt, + pixel_decoder=pixel_decoder, + ) + return segmentation_head + + +def _create_geometry_encoder(): + """Create geometry encoder with all its components.""" + # Create position encoding for geometry encoder + geo_pos_enc = _create_position_encoding() + # Create CX block for fuser + cx_block = CXBlock( + dim=256, + kernel_size=7, + padding=3, + layer_scale_init_value=1.0e-06, + use_dwconv=True, + ) + # Create geometry encoder layer + geo_layer = TransformerEncoderLayer( + activation="relu", + d_model=256, + dim_feedforward=2048, + dropout=0.1, + pos_enc_at_attn=False, + pre_norm=True, + self_attention=MultiheadAttention( + num_heads=8, + dropout=0.1, + embed_dim=256, + batch_first=False, + ), + pos_enc_at_cross_attn_queries=False, + pos_enc_at_cross_attn_keys=True, + cross_attention=MultiheadAttention( + num_heads=8, + dropout=0.1, + embed_dim=256, + batch_first=False, + ), + ) + + # Create geometry encoder + input_geometry_encoder = SequenceGeometryEncoder( + pos_enc=geo_pos_enc, + encode_boxes_as_points=False, + points_direct_project=True, + points_pool=True, + points_pos_enc=True, + boxes_direct_project=True, + boxes_pool=True, + boxes_pos_enc=True, + d_model=256, + num_layers=3, + layer=geo_layer, + use_act_ckpt=True, + add_cls=True, + add_post_encode_proj=True, + ) + return input_geometry_encoder + + +def _create_sam3_model( + backbone, + transformer, + input_geometry_encoder, + segmentation_head, + dot_prod_scoring, + inst_interactive_predictor, + eval_mode, +): + """Create the SAM3 image model.""" + common_params = { + "backbone": backbone, + "transformer": transformer, + "input_geometry_encoder": input_geometry_encoder, + "segmentation_head": segmentation_head, + "num_feature_levels": 1, + "o2m_mask_predict": True, + "dot_prod_scoring": dot_prod_scoring, + "use_instance_query": False, + "multimask_output": True, + "inst_interactive_predictor": inst_interactive_predictor, + } + + matcher = None + if not eval_mode: + from sam3.train.matcher import BinaryHungarianMatcherV2 + + matcher = BinaryHungarianMatcherV2( + focal=True, + cost_class=2.0, + cost_bbox=5.0, + cost_giou=2.0, + alpha=0.25, + gamma=2, + stable=False, + ) + common_params["matcher"] = matcher + model = Sam3Image(**common_params) + + return model + + +def _create_tracker_maskmem_backbone(): + """Create the SAM3 Tracker memory encoder.""" + # Position encoding for mask memory backbone + position_encoding = PositionEmbeddingSine( + num_pos_feats=64, + normalize=True, + scale=None, + temperature=10000, + precompute_resolution=1008, + ) + + # Mask processing components + mask_downsampler = SimpleMaskDownSampler( + kernel_size=3, stride=2, padding=1, interpol_size=[1152, 1152] + ) + + cx_block_layer = CXBlock( + dim=256, + kernel_size=7, + padding=3, + layer_scale_init_value=1.0e-06, + use_dwconv=True, + ) + + fuser = SimpleFuser(layer=cx_block_layer, num_layers=2) + + maskmem_backbone = SimpleMaskEncoder( + out_dim=64, + position_encoding=position_encoding, + mask_downsampler=mask_downsampler, + fuser=fuser, + ) + + return maskmem_backbone + + +def _create_tracker_transformer(): + """Create the SAM3 Tracker transformer components.""" + # Self attention + self_attention = RoPEAttention( + embedding_dim=256, + num_heads=1, + downsample_rate=1, + dropout=0.1, + rope_theta=10000.0, + feat_sizes=[72, 72], + use_fa3=False, + use_rope_real=False, + ) + + # Cross attention + cross_attention = RoPEAttention( + embedding_dim=256, + num_heads=1, + downsample_rate=1, + dropout=0.1, + kv_in_dim=64, + rope_theta=10000.0, + feat_sizes=[72, 72], + rope_k_repeat=True, + use_fa3=False, + use_rope_real=False, + ) + + # Encoder layer + encoder_layer = TransformerDecoderLayerv2( + cross_attention_first=False, + activation="relu", + dim_feedforward=2048, + dropout=0.1, + pos_enc_at_attn=False, + pre_norm=True, + self_attention=self_attention, + d_model=256, + pos_enc_at_cross_attn_keys=True, + pos_enc_at_cross_attn_queries=False, + cross_attention=cross_attention, + ) + + # Encoder + encoder = TransformerEncoderCrossAttention( + remove_cross_attention_layers=[], + batch_first=True, + d_model=256, + frozen=False, + pos_enc_at_input=True, + layer=encoder_layer, + num_layers=4, + use_act_checkpoint=False, + ) + + # Transformer wrapper + transformer = TransformerWrapper( + encoder=encoder, + decoder=None, + d_model=256, + ) + + return transformer + + +def build_tracker( + apply_temporal_disambiguation: bool, with_backbone: bool = False, compile_mode=None +) -> Sam3TrackerPredictor: + """ + Build the SAM3 Tracker module for video tracking. + + Returns: + Sam3TrackerPredictor: Wrapped SAM3 Tracker module + """ + + # Create model components + maskmem_backbone = _create_tracker_maskmem_backbone() + transformer = _create_tracker_transformer() + backbone = None + if with_backbone: + vision_backbone = _create_vision_backbone(compile_mode=compile_mode) + backbone = SAM3VLBackbone(scalp=1, visual=vision_backbone, text=None) + # Create the Tracker module + model = Sam3TrackerPredictor( + image_size=1008, + num_maskmem=7, + backbone=backbone, + backbone_stride=14, + transformer=transformer, + maskmem_backbone=maskmem_backbone, + # SAM parameters + multimask_output_in_sam=True, + # Evaluation + forward_backbone_per_frame_for_eval=True, + trim_past_non_cond_mem_for_eval=False, + # Multimask + multimask_output_for_tracking=True, + multimask_min_pt_num=0, + multimask_max_pt_num=1, + # Additional settings + always_start_from_first_ann_frame=False, + # Mask overlap + non_overlap_masks_for_mem_enc=False, + non_overlap_masks_for_output=False, + max_cond_frames_in_attn=4, + offload_output_to_cpu_for_eval=False, + # SAM decoder settings + sam_mask_decoder_extra_args={ + "dynamic_multimask_via_stability": True, + "dynamic_multimask_stability_delta": 0.05, + "dynamic_multimask_stability_thresh": 0.98, + }, + clear_non_cond_mem_around_input=True, + fill_hole_area=0, + use_memory_selection=apply_temporal_disambiguation, + ) + + return model + + +def _create_text_encoder(bpe_path: str) -> VETextEncoder: + """Create SAM3 text encoder.""" + tokenizer = SimpleTokenizer(bpe_path=bpe_path) + return VETextEncoder( + tokenizer=tokenizer, + d_model=256, + width=1024, + heads=16, + layers=24, + ) + + +def _create_vision_backbone( + compile_mode=None, enable_inst_interactivity=True +) -> Sam3DualViTDetNeck: + """Create SAM3 visual backbone with ViT and neck.""" + # Position encoding + position_encoding = _create_position_encoding(precompute_resolution=1008) + # ViT backbone + vit_backbone: ViT = _create_vit_backbone(compile_mode=compile_mode) + vit_neck: Sam3DualViTDetNeck = _create_vit_neck( + position_encoding, + vit_backbone, + enable_inst_interactivity=enable_inst_interactivity, + ) + # Visual neck + return vit_neck + + +def _create_sam3_transformer(has_presence_token: bool = True) -> TransformerWrapper: + """Create SAM3 transformer encoder and decoder.""" + encoder: TransformerEncoderFusion = _create_transformer_encoder() + decoder: TransformerDecoder = _create_transformer_decoder() + + return TransformerWrapper(encoder=encoder, decoder=decoder, d_model=256) + + +def _load_checkpoint(model, checkpoint_path): + """Load model checkpoint from file.""" + with g_pathmgr.open(checkpoint_path, "rb") as f: + ckpt = torch.load(f, map_location="cpu", weights_only=True) + if "model" in ckpt and isinstance(ckpt["model"], dict): + ckpt = ckpt["model"] + sam3_image_ckpt = { + k.replace("detector.", ""): v for k, v in ckpt.items() if "detector" in k + } + if model.inst_interactive_predictor is not None: + sam3_image_ckpt.update( + { + k.replace("tracker.", "inst_interactive_predictor.model."): v + for k, v in ckpt.items() + if "tracker" in k + } + ) + missing_keys, _ = model.load_state_dict(sam3_image_ckpt, strict=False) + if len(missing_keys) > 0: + print( + f"loaded {checkpoint_path} and found " + f"missing and/or unexpected keys:\n{missing_keys=}" + ) + + +def _setup_device_and_mode(model, device, eval_mode): + """Setup model device and evaluation mode.""" + if device == "cuda": + model = model.cuda() + if eval_mode: + model.eval() + return model + + +def build_sam3_image_model( + bpe_path=None, + device="cuda" if torch.cuda.is_available() else "cpu", + eval_mode=True, + checkpoint_path=None, + load_from_HF=True, + enable_segmentation=True, + enable_inst_interactivity=False, + compile=False, +): + """ + Build SAM3 image model + + Args: + bpe_path: Path to the BPE tokenizer vocabulary + device: Device to load the model on ('cuda' or 'cpu') + eval_mode: Whether to set the model to evaluation mode + checkpoint_path: Optional path to model checkpoint + enable_segmentation: Whether to enable segmentation head + enable_inst_interactivity: Whether to enable instance interactivity (SAM 1 task) + compile_mode: To enable compilation, set to "default" + + Returns: + A SAM3 image model + """ + if bpe_path is None: + bpe_path = os.path.join( + os.path.dirname(__file__), "..", "assets", "bpe_simple_vocab_16e6.txt.gz" + ) + # Create visual components + compile_mode = "default" if compile else None + vision_encoder = _create_vision_backbone( + compile_mode=compile_mode, enable_inst_interactivity=enable_inst_interactivity + ) + + # Create text components + text_encoder = _create_text_encoder(bpe_path) + + # Create visual-language backbone + backbone = _create_vl_backbone(vision_encoder, text_encoder) + + # Create transformer components + transformer = _create_sam3_transformer() + + # Create dot product scoring + dot_prod_scoring = _create_dot_product_scoring() + + # Create segmentation head if enabled + segmentation_head = ( + _create_segmentation_head(compile_mode=compile_mode) + if enable_segmentation + else None + ) + + # Create geometry encoder + input_geometry_encoder = _create_geometry_encoder() + if enable_inst_interactivity: + sam3_pvs_base = build_tracker(apply_temporal_disambiguation=False) + inst_predictor = SAM3InteractiveImagePredictor(sam3_pvs_base) + else: + inst_predictor = None + # Create the SAM3 model + model = _create_sam3_model( + backbone, + transformer, + input_geometry_encoder, + segmentation_head, + dot_prod_scoring, + inst_predictor, + eval_mode, + ) + if load_from_HF and checkpoint_path is None: + checkpoint_path = download_ckpt_from_hf() + # Load checkpoint if provided + if checkpoint_path is not None: + _load_checkpoint(model, checkpoint_path) + + # Setup device and mode + model = _setup_device_and_mode(model, device, eval_mode) + + return model + + +def download_ckpt_from_hf(): + SAM3_MODEL_ID = "facebook/sam3" + SAM3_CKPT_NAME = "sam3.pt" + SAM3_CFG_NAME = "config.json" + _ = hf_hub_download(repo_id=SAM3_MODEL_ID, filename=SAM3_CFG_NAME) + checkpoint_path = hf_hub_download(repo_id=SAM3_MODEL_ID, filename=SAM3_CKPT_NAME) + return checkpoint_path + + +def build_sam3_video_model( + checkpoint_path: Optional[str] = None, + load_from_HF=True, + bpe_path: Optional[str] = None, + has_presence_token: bool = True, + geo_encoder_use_img_cross_attn: bool = True, + strict_state_dict_loading: bool = True, + apply_temporal_disambiguation: bool = True, + device="cuda" if torch.cuda.is_available() else "cpu", + compile=False, +) -> Sam3VideoInferenceWithInstanceInteractivity: + """ + Build SAM3 dense tracking model. + + Args: + checkpoint_path: Optional path to checkpoint file + bpe_path: Path to the BPE tokenizer file + + Returns: + Sam3VideoInferenceWithInstanceInteractivity: The instantiated dense tracking model + """ + if bpe_path is None: + bpe_path = os.path.join( + os.path.dirname(__file__), "..", "assets", "bpe_simple_vocab_16e6.txt.gz" + ) + + # Build Tracker module + tracker = build_tracker(apply_temporal_disambiguation=apply_temporal_disambiguation) + + # Build Detector components + visual_neck = _create_vision_backbone() + text_encoder = _create_text_encoder(bpe_path) + backbone = SAM3VLBackbone(scalp=1, visual=visual_neck, text=text_encoder) + transformer = _create_sam3_transformer(has_presence_token=has_presence_token) + segmentation_head: UniversalSegmentationHead = _create_segmentation_head() + input_geometry_encoder = _create_geometry_encoder() + + # Create main dot product scoring + main_dot_prod_mlp = MLP( + input_dim=256, + hidden_dim=2048, + output_dim=256, + num_layers=2, + dropout=0.1, + residual=True, + out_norm=nn.LayerNorm(256), + ) + main_dot_prod_scoring = DotProductScoring( + d_model=256, d_proj=256, prompt_mlp=main_dot_prod_mlp + ) + + # Build Detector module + detector = Sam3ImageOnVideoMultiGPU( + num_feature_levels=1, + backbone=backbone, + transformer=transformer, + segmentation_head=segmentation_head, + semantic_segmentation_head=None, + input_geometry_encoder=input_geometry_encoder, + use_early_fusion=True, + use_dot_prod_scoring=True, + dot_prod_scoring=main_dot_prod_scoring, + supervise_joint_box_scores=has_presence_token, + ) + + # Build the main SAM3 video model + if apply_temporal_disambiguation: + model = Sam3VideoInferenceWithInstanceInteractivity( + detector=detector, + tracker=tracker, + score_threshold_detection=0.5, + assoc_iou_thresh=0.1, + det_nms_thresh=0.1, + new_det_thresh=0.7, + hotstart_delay=15, + hotstart_unmatch_thresh=8, + hotstart_dup_thresh=8, + suppress_unmatched_only_within_hotstart=True, + min_trk_keep_alive=-1, + max_trk_keep_alive=30, + init_trk_keep_alive=30, + suppress_overlapping_based_on_recent_occlusion_threshold=0.7, + suppress_det_close_to_boundary=False, + fill_hole_area=16, + recondition_every_nth_frame=16, + masklet_confirmation_enable=False, + decrease_trk_keep_alive_for_empty_masklets=False, + image_size=1008, + image_mean=(0.5, 0.5, 0.5), + image_std=(0.5, 0.5, 0.5), + compile_model=compile, + ) + else: + # a version without any heuristics for ablation studies + model = Sam3VideoInferenceWithInstanceInteractivity( + detector=detector, + tracker=tracker, + score_threshold_detection=0.5, + assoc_iou_thresh=0.1, + det_nms_thresh=0.1, + new_det_thresh=0.7, + hotstart_delay=0, + hotstart_unmatch_thresh=0, + hotstart_dup_thresh=0, + suppress_unmatched_only_within_hotstart=True, + min_trk_keep_alive=-1, + max_trk_keep_alive=30, + init_trk_keep_alive=30, + suppress_overlapping_based_on_recent_occlusion_threshold=0.7, + suppress_det_close_to_boundary=False, + fill_hole_area=16, + recondition_every_nth_frame=0, + masklet_confirmation_enable=False, + decrease_trk_keep_alive_for_empty_masklets=False, + image_size=1008, + image_mean=(0.5, 0.5, 0.5), + image_std=(0.5, 0.5, 0.5), + compile_model=compile, + ) + + # Load checkpoint if provided + if load_from_HF and checkpoint_path is None: + checkpoint_path = download_ckpt_from_hf() + if checkpoint_path is not None: + with g_pathmgr.open(checkpoint_path, "rb") as f: + ckpt = torch.load(f, map_location="cpu", weights_only=True) + if "model" in ckpt and isinstance(ckpt["model"], dict): + ckpt = ckpt["model"] + + missing_keys, unexpected_keys = model.load_state_dict( + ckpt, strict=strict_state_dict_loading + ) + if missing_keys: + print(f"Missing keys: {missing_keys}") + if unexpected_keys: + print(f"Unexpected keys: {unexpected_keys}") + + model.to(device=device) + return model + + +def build_sam3_video_predictor(*model_args, gpus_to_use=None, **model_kwargs): + return Sam3VideoPredictorMultiGPU( + *model_args, gpus_to_use=gpus_to_use, **model_kwargs + ) diff --git a/detect_tools/sam3/sam3/perflib/__init__.py b/detect_tools/sam3/sam3/perflib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3823b937eb21830e435700c972ea8f6a0efa9b --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import os + +is_enabled = False +if os.getenv("USE_PERFLIB", "1") == "1": + # print("Enabled the use of perflib.\n", end="") + is_enabled = True diff --git a/detect_tools/sam3/sam3/perflib/associate_det_trk.py b/detect_tools/sam3/sam3/perflib/associate_det_trk.py new file mode 100644 index 0000000000000000000000000000000000000000..508ae817dcd60d835e3581cf0e7883163af0688e --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/associate_det_trk.py @@ -0,0 +1,137 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from collections import defaultdict + +import torch +import torch.nn.functional as F +from sam3.perflib.masks_ops import mask_iou +from scipy.optimize import linear_sum_assignment + + +def associate_det_trk( + det_masks, + track_masks, + iou_threshold=0.5, + iou_threshold_trk=0.5, + det_scores=None, + new_det_thresh=0.0, +): + """ + Optimized implementation of detection <-> track association that minimizes DtoH syncs. + + Args: + det_masks: (N, H, W) tensor of predicted masks + track_masks: (M, H, W) tensor of track masks + + Returns: + new_det_indices: list of indices in det_masks considered 'new' + unmatched_trk_indices: list of indices in track_masks considered 'unmatched' + """ + with torch.autograd.profiler.record_function("perflib: associate_det_trk"): + assert isinstance(det_masks, torch.Tensor), "det_masks should be a tensor" + assert isinstance(track_masks, torch.Tensor), "track_masks should be a tensor" + if det_masks.size(0) == 0 or track_masks.size(0) == 0: + return list(range(det_masks.size(0))), [], {}, {} # all detections are new + + if list(det_masks.shape[-2:]) != list(track_masks.shape[-2:]): + # resize to the smaller size to save GPU memory + if torch.numel(det_masks[-2:]) < torch.numel(track_masks[-2:]): + track_masks = ( + F.interpolate( + track_masks.unsqueeze(1).float(), + size=det_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ).squeeze(1) + > 0 + ) + else: + # resize detections to track size + det_masks = ( + F.interpolate( + det_masks.unsqueeze(1).float(), + size=track_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ).squeeze(1) + > 0 + ) + + det_masks = det_masks > 0 + track_masks = track_masks > 0 + + iou = mask_iou(det_masks, track_masks) # (N, M) + igeit = iou >= iou_threshold + igeit_any_dim_1 = igeit.any(dim=1) + igeit_trk = iou >= iou_threshold_trk + + iou_list = iou.cpu().numpy().tolist() + igeit_list = igeit.cpu().numpy().tolist() + igeit_any_dim_1_list = igeit_any_dim_1.cpu().numpy().tolist() + igeit_trk_list = igeit_trk.cpu().numpy().tolist() + + det_scores_list = ( + det_scores + if det_scores is None + else det_scores.cpu().float().numpy().tolist() + ) + + # Hungarian matching for tracks (one-to-one: each track matches at most one detection) + # For detections: allow many tracks to match to the same detection (many-to-one) + + # If either is empty, return all detections as new + if det_masks.size(0) == 0 or track_masks.size(0) == 0: + return list(range(det_masks.size(0))), [], {} + + # Hungarian matching: maximize IoU for tracks + cost_matrix = 1 - iou.cpu().numpy() # Hungarian solves for minimum cost + row_ind, col_ind = linear_sum_assignment(cost_matrix) + + def branchy_hungarian_better_uses_the_cpu( + cost_matrix, row_ind, col_ind, iou_list, det_masks, track_masks + ): + matched_trk = set() + matched_det = set() + matched_det_scores = {} # track index -> [det_score, det_score * iou] det score of matched detection mask + for d, t in zip(row_ind, col_ind): + matched_det_scores[t] = [ + det_scores_list[d], + det_scores_list[d] * iou_list[d][t], + ] + if igeit_trk_list[d][t]: + matched_trk.add(t) + matched_det.add(d) + + # Tracks not matched by Hungarian assignment above threshold are unmatched + unmatched_trk_indices = [ + t for t in range(track_masks.size(0)) if t not in matched_trk + ] + + # For detections: allow many tracks to match to the same detection (many-to-one) + # So, a detection is 'new' if it does not match any track above threshold + assert track_masks.size(0) == igeit.size( + 1 + ) # Needed for loop optimizaiton below + new_det_indices = [] + for d in range(det_masks.size(0)): + if not igeit_any_dim_1_list[d]: + if det_scores is not None and det_scores[d] >= new_det_thresh: + new_det_indices.append(d) + + # for each detection, which tracks it matched to (above threshold) + det_to_matched_trk = defaultdict(list) + for d in range(det_masks.size(0)): + for t in range(track_masks.size(0)): + if igeit_list[d][t]: + det_to_matched_trk[d].append(t) + + return ( + new_det_indices, + unmatched_trk_indices, + det_to_matched_trk, + matched_det_scores, + ) + + return (branchy_hungarian_better_uses_the_cpu)( + cost_matrix, row_ind, col_ind, iou_list, det_masks, track_masks + ) diff --git a/detect_tools/sam3/sam3/perflib/compile.py b/detect_tools/sam3/sam3/perflib/compile.py new file mode 100644 index 0000000000000000000000000000000000000000..f427aa7b066ca366b3650e331b22072d4e07c4c5 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/compile.py @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import torch + + +def recursive_fn_factory(fn): + def recursive_fn(b): + if isinstance(b, dict): + return {k: recursive_fn(b[k]) for k in b} + if isinstance(b, list): + return [recursive_fn(t) for t in b] + if isinstance(b, tuple): + return tuple(recursive_fn(t) for t in b) + if isinstance(b, torch.Tensor): + return fn(b) + # Yes, writing out an explicit white list of + # trivial types is tedious, but so are bugs that + # come from not applying fn, when expected to have + # applied it. + if b is None: + return b + trivial_types = [bool, int] + for t in trivial_types: + if isinstance(b, t): + return b + raise TypeError(f"Unexpected type {type(b)}") + + return recursive_fn + + +recursive_contiguous = recursive_fn_factory(lambda x: x.contiguous()) +recursive_clone = recursive_fn_factory(torch.clone) + + +def compile_wrapper( + fn, *, mode="max-autotune", fullgraph=True, dynamic=False, name=None +): + compiled_fn = torch.compile(fn, mode=mode, fullgraph=fullgraph, dynamic=dynamic) + + def compiled_fn_wrapper(*args, **kwargs): + with torch.autograd.profiler.record_function( + f"compiled {fn}" if name is None else name + ): + cont_args = recursive_contiguous(args) + cont_kwargs = recursive_contiguous(kwargs) + result = compiled_fn(*cont_args, **cont_kwargs) + cloned_result = recursive_clone(result) + return cloned_result + + return compiled_fn_wrapper + + +def shape_logging_wrapper(fn, keep_kwargs, enable_logging=False): + """ + Wraps a function and prints the shapes of all tensor inputs. + Only prints when a new combination of shapes is seen. + Thread-safe. + + Args: + fn: Function to wrap + enable_logging: Boolean flag to enable/disable logging + """ + seen_shapes = set() + + def get_shape(obj): + if isinstance(obj, torch.Tensor): + return obj.shape + elif isinstance(obj, (list, tuple)): + if len(obj) > 1: + return tuple(get_shape(x) for x in obj) + return get_shape(obj[0]) + elif isinstance(obj, dict): + return tuple(sorted((k, get_shape(v)) for k, v in obj.items())) + else: + return type(obj).__name__ + + def wrapper(*args, **kwargs): + shapes = tuple(get_shape(arg) for arg in args) + tuple( + (k, get_shape(v)) + for k, v in kwargs.items() + if isinstance(v, (torch.Tensor, list)) + and (len(keep_kwargs) > 0 and k in keep_kwargs) + ) + if shapes not in seen_shapes: + seen_shapes.add(shapes) + if enable_logging: + print(f"[ShapeLogger] New input shapes for {fn.__qualname__}: {shapes}") + return fn(*args, **kwargs) + + # Allow toggling the flag at runtime + wrapper.enable_logging = enable_logging + + def set_logging(enabled=False): + nonlocal enable_logging + enable_logging = enabled + wrapper.enable_logging = enable_logging + + wrapper.set_logging = set_logging + return wrapper diff --git a/detect_tools/sam3/sam3/perflib/connected_components.py b/detect_tools/sam3/sam3/perflib/connected_components.py new file mode 100644 index 0000000000000000000000000000000000000000..c96932a4ae31092872a0c50b7fec3c53662df354 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/connected_components.py @@ -0,0 +1,84 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import logging + +import torch + +try: + from cc_torch import get_connected_components + + HAS_CC_TORCH = True +except ImportError: + logging.debug( + "cc_torch not found. Consider installing for better performance. Command line:" + " pip install git+https://github.com/ronghanghu/cc_torch.git" + ) + HAS_CC_TORCH = False + + +def connected_components_cpu_single(values: torch.Tensor): + assert values.dim() == 2 + from skimage.measure import label + + labels, num = label(values.cpu().numpy(), return_num=True) + labels = torch.from_numpy(labels) + counts = torch.zeros_like(labels) + for i in range(1, num + 1): + cur_mask = labels == i + cur_count = cur_mask.sum() + counts[cur_mask] = cur_count + return labels, counts + + +def connected_components_cpu(input_tensor: torch.Tensor): + out_shape = input_tensor.shape + if input_tensor.dim() == 4 and input_tensor.shape[1] == 1: + input_tensor = input_tensor.squeeze(1) + else: + assert ( + input_tensor.dim() == 3 + ), "Input tensor must be (B, H, W) or (B, 1, H, W)." + + batch_size = input_tensor.shape[0] + labels_list = [] + counts_list = [] + for b in range(batch_size): + labels, counts = connected_components_cpu_single(input_tensor[b]) + labels_list.append(labels) + counts_list.append(counts) + labels_tensor = torch.stack(labels_list, dim=0).to(input_tensor.device) + counts_tensor = torch.stack(counts_list, dim=0).to(input_tensor.device) + return labels_tensor.view(out_shape), counts_tensor.view(out_shape) + + +def connected_components(input_tensor: torch.Tensor): + """ + Computes connected components labeling on a batch of 2D tensors, using the best available backend. + + Args: + input_tensor (torch.Tensor): A BxHxW integer tensor or Bx1xHxW. Non-zero values are considered foreground. Bool tensor also accepted + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Both tensors have the same shape as input_tensor. + - A tensor with dense labels. Background is 0. + - A tensor with the size of the connected component for each pixel. + """ + if input_tensor.dim() == 3: + input_tensor = input_tensor.unsqueeze(1) + + assert ( + input_tensor.dim() == 4 and input_tensor.shape[1] == 1 + ), "Input tensor must be (B, H, W) or (B, 1, H, W)." + + if input_tensor.is_cuda: + if HAS_CC_TORCH: + return get_connected_components(input_tensor.to(torch.uint8)) + else: + # triton fallback + from sam3.perflib.triton.connected_components import ( + connected_components_triton, + ) + + return connected_components_triton(input_tensor) + + # CPU fallback + return connected_components_cpu(input_tensor) diff --git a/detect_tools/sam3/sam3/perflib/fa3.py b/detect_tools/sam3/sam3/perflib/fa3.py new file mode 100644 index 0000000000000000000000000000000000000000..8f8c9bd544679d01963c8df2594a0184a93f72c8 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/fa3.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import torch + + +@torch.library.custom_op("flash::flash_attn_func", mutates_args=()) +def flash_attn_func_op( + q: torch.Tensor, k: torch.Tensor, v: torch.Tensor +) -> torch.Tensor: + from flash_attn_interface import flash_attn_func as fa3 + + return fa3(q, k, v) + + +def flash_attn_func(q, k, v): + dtype = torch.float8_e4m3fn + return flash_attn_func_op(q.to(dtype), k.to(dtype), v.to(dtype)).to(q.dtype) + + +@flash_attn_func_op.register_fake +def _(q, k, v, **kwargs): + # two outputs: + # 1. output: (batch, seq_len, num_heads, head_dim) + # 2. softmax_lse: (batch, num_heads, seq_len) with dtype=torch.float32 + # output needs to be bfloat16, not float8! + meta_q = torch.empty_like(q, dtype=torch.bfloat16).contiguous() + return meta_q diff --git a/detect_tools/sam3/sam3/perflib/masks_ops.py b/detect_tools/sam3/sam3/perflib/masks_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..48299d585df170ac67cdf5e66d6108668cf6cd97 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/masks_ops.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import torch + + +def masks_to_boxes(masks: torch.Tensor, obj_ids: list[int]): + with torch.autograd.profiler.record_function("perflib: masks_to_boxes"): + # Sanity check based on callsite for replacement + assert masks.shape[0] == len(obj_ids) + assert masks.dim() == 3 + + # Based on torchvision masks_to_boxes + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device, dtype=torch.float) + + N, H, W = masks.shape + device = masks.device + y = torch.arange(H, device=device).view(1, H) + x = torch.arange(W, device=device).view(1, W) + + masks_with_obj = masks != 0 # N, H, W + masks_with_obj_x = masks_with_obj.amax( + dim=1 + ) # N, H (which columns have objects) + masks_with_obj_y = masks_with_obj.amax(dim=2) # N, W (which rows have objects) + masks_without_obj_x = ~masks_with_obj_x + masks_without_obj_y = ~masks_with_obj_y + + bounding_boxes_0 = torch.amin( + (masks_without_obj_x * W) + (masks_with_obj_x * x), dim=1 + ) + bounding_boxes_1 = torch.amin( + (masks_without_obj_y * H) + (masks_with_obj_y * y), dim=1 + ) + bounding_boxes_2 = torch.amax(masks_with_obj_x * x, dim=1) + bounding_boxes_3 = torch.amax(masks_with_obj_y * y, dim=1) + + bounding_boxes = torch.stack( + [bounding_boxes_0, bounding_boxes_1, bounding_boxes_2, bounding_boxes_3], + dim=1, + ).to(dtype=torch.float) + assert bounding_boxes.shape == (N, 4) + assert bounding_boxes.device == masks.device + assert bounding_boxes.dtype == torch.float + return bounding_boxes + + +def mask_iou(pred_masks: torch.Tensor, gt_masks: torch.Tensor) -> torch.Tensor: + """ + Compute the IoU (Intersection over Union) between predicted masks and ground truth masks. + Args: + - pred_masks: (N, H, W) bool Tensor, containing binary predicted segmentation masks + - gt_masks: (M, H, W) bool Tensor, containing binary ground truth segmentation masks + Returns: + - ious: (N, M) float Tensor, containing IoUs for each pair of predicted and ground truth masks + """ + assert pred_masks.dtype == gt_masks.dtype == torch.bool + N, H, W = pred_masks.shape + M, _, _ = gt_masks.shape + + # Flatten masks: (N, 1, H*W) and (1, M, H*W) + pred_flat = pred_masks.view(N, 1, H * W) + gt_flat = gt_masks.view(1, M, H * W) + + # Compute intersection and union: (N, M) + intersection = (pred_flat & gt_flat).sum(dim=2).float() + union = (pred_flat | gt_flat).sum(dim=2).float() + ious = intersection / union.clamp(min=1) + return ious # shape: (N, M) diff --git a/detect_tools/sam3/sam3/perflib/nms.py b/detect_tools/sam3/sam3/perflib/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..b3efc5995c33005d4ec9f683096eb1a8f63660c5 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/nms.py @@ -0,0 +1,91 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging + +import numpy as np +import torch + +from sam3.perflib.masks_ops import mask_iou + + +try: + from torch_generic_nms import generic_nms as generic_nms_cuda + + GENERIC_NMS_AVAILABLE = True +except ImportError: + logging.debug( + "Falling back to triton or CPU mask NMS implementation -- please install `torch_generic_nms` via\n\t" + 'pip uninstall -y torch_generic_nms; TORCH_CUDA_ARCH_LIST="8.0 9.0" pip install git+https://github.com/ronghanghu/torch_generic_nms' + ) + GENERIC_NMS_AVAILABLE = False + + +def nms_masks( + pred_probs: torch.Tensor, + pred_masks: torch.Tensor, + prob_threshold: float, + iou_threshold: float, +) -> torch.Tensor: + """ + Args: + - pred_probs: (num_det,) float Tensor, containing the score (probability) of each detection + - pred_masks: (num_det, H_mask, W_mask) float Tensor, containing the binary segmentation mask of each detection + - prob_threshold: float, score threshold to prefilter detections (NMS is performed on detections above threshold) + - iou_threshold: float, mask IoU threshold for NMS + + Returns: + - keep: (num_det,) bool Tensor, indicating whether each detection is kept after score thresholding + NMS + """ + # prefilter the detections with prob_threshold ("valid" are those above prob_threshold) + is_valid = pred_probs > prob_threshold # (num_det,) + probs = pred_probs[is_valid] # (num_valid,) + masks_binary = pred_masks[is_valid] > 0 # (num_valid, H_mask, W_mask) + if probs.numel() == 0: + return is_valid # no valid detection, return empty keep mask + + ious = mask_iou(masks_binary, masks_binary) # (num_valid, num_valid) + kept_inds = generic_nms(ious, probs, iou_threshold) + + # valid_inds are the indices among `probs` of valid detections before NMS (or -1 for invalid) + valid_inds = torch.where(is_valid, is_valid.cumsum(dim=0) - 1, -1) # (num_det,) + keep = torch.isin(valid_inds, kept_inds) # (num_det,) + return keep + + +def generic_nms( + ious: torch.Tensor, scores: torch.Tensor, iou_threshold=0.5 +) -> torch.Tensor: + """A generic version of `torchvision.ops.nms` that takes a pairwise IoU matrix.""" + + assert ious.dim() == 2 and ious.size(0) == ious.size(1) + assert scores.dim() == 1 and scores.size(0) == ious.size(0) + + if ious.is_cuda: + if GENERIC_NMS_AVAILABLE: + return generic_nms_cuda(ious, scores, iou_threshold, use_iou_matrix=True) + else: + from sam3.perflib.triton.nms import nms_triton + + return nms_triton(ious, scores, iou_threshold) + + return generic_nms_cpu(ious, scores, iou_threshold) + + +def generic_nms_cpu( + ious: torch.Tensor, scores: torch.Tensor, iou_threshold=0.5 +) -> torch.Tensor: + """ + A generic version of `torchvision.ops.nms` that takes a pairwise IoU matrix. (CPU implementation + based on https://github.com/jwyang/faster-rcnn.pytorch/blob/master/lib/model/nms/nms_cpu.py) + """ + ious_np = ious.float().detach().cpu().numpy() + scores_np = scores.float().detach().cpu().numpy() + order = scores_np.argsort()[::-1] + kept_inds = [] + while order.size > 0: + i = order.item(0) + kept_inds.append(i) + inds = np.where(ious_np[i, order[1:]] <= iou_threshold)[0] + order = order[inds + 1] + + return torch.tensor(kept_inds, dtype=torch.int64, device=scores.device) diff --git a/detect_tools/sam3/sam3/perflib/tests/tests.py b/detect_tools/sam3/sam3/perflib/tests/tests.py new file mode 100644 index 0000000000000000000000000000000000000000..0fb88ad007951b69405be012f1fb0b242155c6cc --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/tests/tests.py @@ -0,0 +1,59 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import os + +import numpy as np +import pytest +import torch +from PIL import Image +from sam3.perflib.masks_ops import masks_to_boxes + + +class TestMasksToBoxes: + def test_masks_box(self): + def masks_box_check(masks, expected, atol=1e-4): + out = masks_to_boxes(masks, [1 for _ in range(masks.shape[0])]) + assert out.dtype == torch.float + print("out: ", out) + print("expected: ", expected) + torch.testing.assert_close( + out, expected, rtol=0.0, check_dtype=True, atol=atol + ) + + # Check for int type boxes. + def _get_image(): + assets_directory = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "assets" + ) + mask_path = os.path.join(assets_directory, "masks.tiff") + image = Image.open(mask_path) + return image + + def _create_masks(image, masks): + for index in range(image.n_frames): + image.seek(index) + frame = np.array(image) + masks[index] = torch.tensor(frame) + + return masks + + expected = torch.tensor( + [ + [127, 2, 165, 40], + [2, 50, 44, 92], + [56, 63, 98, 100], + [139, 68, 175, 104], + [160, 112, 198, 145], + [49, 138, 99, 182], + [108, 148, 152, 213], + ], + dtype=torch.float, + ) + + image = _get_image() + for dtype in [torch.float16, torch.float32, torch.float64]: + masks = torch.zeros( + (image.n_frames, image.height, image.width), dtype=dtype + ) + masks = _create_masks(image, masks) + masks_box_check(masks, expected) diff --git a/detect_tools/sam3/sam3/perflib/triton/connected_components.py b/detect_tools/sam3/sam3/perflib/triton/connected_components.py new file mode 100644 index 0000000000000000000000000000000000000000..253ca9d54bdf3b47dbff54721a4cfe78aa443f09 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/triton/connected_components.py @@ -0,0 +1,468 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import math + +import torch +import triton +import triton.language as tl + + +@triton.jit +def _any_combine(a, b): + return a | b + + +@triton.jit +def tl_any(a, dim=0): + return tl.reduce(a, dim, _any_combine) + + +# ============================================================================== +# ## Phase 1: Initialization Kernel +# ============================================================================== +# Each foreground pixel (value > 0) gets a unique label equal to its +# linear index. Background pixels (value == 0) get a sentinel label of -1. +# Note that the indexing is done across batch boundaries for simplicity +# (i.e., the first pixel of image 1 gets label H*W, etc.) + + +@triton.jit +def _init_labels_kernel( + input_ptr, labels_ptr, numel: tl.constexpr, BLOCK_SIZE: tl.constexpr +): + pid = tl.program_id(0) + offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < numel + input_values = tl.load(input_ptr + offsets, mask=mask, other=0) + + indices = tl.where((input_values != 0), offsets, -1) + tl.store(labels_ptr + offsets, indices, mask=mask) + + +# ============================================================================== +# ## Phase 2: Local merging +# ============================================================================== +# Each pixel tries to merge with its 8-connected neighbors (up, down, left, right) +# if they have the same value. This is done using a disjoint-set union operation. + + +@triton.jit +def find(labels_ptr, indices, mask): + current_pids = indices + + # 'is_done' tracks lanes that have finished their work. + # A lane is initially "done" if it's not active (mask is False). + is_done = ~mask + + # Loop as long as there is at least one lane that is NOT done. + while tl_any(~is_done): + # The work_mask is for lanes that are still active and seeking their root. + work_mask = ~is_done + parents = tl.load(labels_ptr + current_pids, mask=work_mask, other=-1) + # A lane is now done if its parent is itself (it's a root) + # or if it hits a -1 sentinel (a safe exit condition). + is_root = parents == current_pids + is_sentinel = parents == -1 + is_done |= is_root | is_sentinel + + # For lanes that are not yet done, update their pid to their parent to continue traversal. + current_pids = tl.where(is_done, current_pids, parents) + # We could add the following line to do path compression, but experimentally it's slower + # tl.atomic_min(labels_ptr + indices, current_pids, mask=mask) + return current_pids + + +@triton.jit +def union(labels_ptr, a, b, process_mask): + # This function implements a disjoint-set union + # As an invariant, we use the fact that the roots have the lower id. That helps parallelization + # However, that is not sufficient by itself. Suppose two threads want to do union(0,2) and union(1,2) at the same time + # Then if we do a naive atomic_min, 0 and 1 will compete to be the new parent of 2 and min(0, 1) will win. + # However, 1 still needs to be merged with the new {0, 2} component. + # To ensure that merge is also done, we need to detect whether the merge was successful, and if not retry until it is + + current_a = a + current_b = b + + final_root = a + # A mask to track which lanes have successfully completed their union. + done_mask = ~process_mask # tl.zeros_like(a) == 1 # Init with all False + + while tl_any(~done_mask): + # Define the mask for lanes that still need work in this iteration + work_mask = process_mask & ~done_mask + + # Find the roots for the current a and b values in the active lanes + root_a = find(labels_ptr, current_a, work_mask) + tl.debug_barrier() + root_b = find(labels_ptr, current_b, work_mask) + + # 7. Merge logic + # If roots are already the same, the sets are already merged. Mark as done. + are_equal = root_a == root_b + final_root = tl.where(are_equal & work_mask & ~done_mask, root_a, final_root) + done_mask |= are_equal & work_mask + + # Define masks for the two merge cases (a < b or b < a) + a_is_smaller = root_a < root_b + + # Case 1: root_a < root_b. Attempt to set parent[root_b] = root_a + merge_mask_a_smaller = work_mask & a_is_smaller & ~are_equal + ptr_b = labels_ptr + root_b + old_val_b = tl.atomic_min(ptr_b, root_a, mask=merge_mask_a_smaller) + + # A lane is done if its atomic op was successful (old value was what we expected) + success_b = old_val_b == root_b + final_root = tl.where(success_b & work_mask & ~done_mask, root_a, final_root) + done_mask |= success_b & merge_mask_a_smaller + + # *** Crucial Retry Logic *** + # If the update failed (old_val_b != root_b), another thread interfered. + # We update `current_b` to this new root (`old_val_b`) and will retry in the next loop iteration. + current_b = tl.where(success_b | ~merge_mask_a_smaller, current_b, old_val_b) + + # Case 2: root_b < root_a. Attempt to set parent[root_a] = root_b + merge_mask_b_smaller = work_mask & ~a_is_smaller & ~are_equal + ptr_a = labels_ptr + root_a + old_val_a = tl.atomic_min(ptr_a, root_b, mask=merge_mask_b_smaller) + + success_a = old_val_a == root_a + final_root = tl.where(success_a & work_mask & ~done_mask, root_b, final_root) + done_mask |= success_a & merge_mask_b_smaller + + # *** Crucial Retry Logic *** + # Similarly, update `current_a` if the atomic operation failed. + current_a = tl.where(success_a | ~merge_mask_b_smaller, current_a, old_val_a) + + return final_root + + +@triton.jit +def _merge_helper( + input_ptr, + labels_ptr, + base_offset, + offsets_h, + offsets_w, + mask_2d, + valid_current, + current_values, + current_labels, + H, + W, + dx: tl.constexpr, + dy: tl.constexpr, +): + # Helper functions to compute merge with a specific neighbor offset (dx, dy) + + neighbor_h = offsets_h + dy + neighbor_w = offsets_w + dx + # Proper bounds checking: all four bounds must be satisfied + mask_n = ( + mask_2d + & (neighbor_h[:, None] >= 0) + & (neighbor_h[:, None] < H) + & (neighbor_w[None, :] >= 0) + & (neighbor_w[None, :] < W) + ) + + offsets_neighbor = neighbor_h[:, None] * W + neighbor_w[None, :] + neighbor_values = tl.load( + input_ptr + base_offset + offsets_neighbor, mask=mask_n, other=-1 + ) + + mask_n = tl.ravel(mask_n) + neighbor_labels = tl.load( + labels_ptr + tl.ravel(base_offset + offsets_neighbor), mask=mask_n, other=-1 + ) + + to_merge = ( + mask_n & (neighbor_labels != -1) & tl.ravel(current_values == neighbor_values) + ) + valid_write = valid_current & to_merge + + # returns new parents for the pixels that were merged (otherwise keeps current labels) + return tl.where( + valid_write, + union(labels_ptr, current_labels, neighbor_labels, valid_write), + current_labels, + ) + + +@triton.autotune( + configs=[ + triton.Config( + {"BLOCK_SIZE_H": 4, "BLOCK_SIZE_W": 16}, num_stages=1, num_warps=2 + ), + triton.Config( + {"BLOCK_SIZE_H": 4, "BLOCK_SIZE_W": 32}, num_stages=2, num_warps=4 + ), + ], + key=["H", "W"], + restore_value=["labels_ptr"], +) +@triton.jit +def _local_prop_kernel( + labels_ptr, + input_ptr, + H: tl.constexpr, + W: tl.constexpr, + BLOCK_SIZE_H: tl.constexpr, + BLOCK_SIZE_W: tl.constexpr, +): + # This is the meat of the Phase 2 to do local merging + # It will be launched with a 2D grid: + # - dim 0: batch index + # - dim 1: block index over HxW image (2D tiling) + pid_b = tl.program_id(0) + pid_hw = tl.program_id(1) + + # Calculate offsets for the core block + offsets_h = (pid_hw // tl.cdiv(W, BLOCK_SIZE_W)) * BLOCK_SIZE_H + tl.arange( + 0, BLOCK_SIZE_H + ) + offsets_w = (pid_hw % tl.cdiv(W, BLOCK_SIZE_W)) * BLOCK_SIZE_W + tl.arange( + 0, BLOCK_SIZE_W + ) + + base_offset = pid_b * H * W + offsets_2d = offsets_h[:, None] * W + offsets_w[None, :] + mask_2d = (offsets_h[:, None] < H) & (offsets_w[None, :] < W) + mask_1d = tl.ravel(mask_2d) + + # Load the current labels for the block - these are parent pointers + current_labels = tl.load( + labels_ptr + tl.ravel(base_offset + offsets_2d), mask=mask_1d, other=-1 + ) + current_values = tl.load( + input_ptr + base_offset + offsets_2d, mask=mask_2d, other=-1 + ) + valid_current = mask_1d & (current_labels != -1) + + # Horizontal merge + current_labels = _merge_helper( + input_ptr, + labels_ptr, + base_offset, + offsets_h, + offsets_w, + mask_2d, + valid_current, + current_values, + current_labels, + H, + W, + -1, + 0, + ) + # Vertical merge + current_labels = _merge_helper( + input_ptr, + labels_ptr, + base_offset, + offsets_h, + offsets_w, + mask_2d, + valid_current, + current_values, + current_labels, + H, + W, + 0, + -1, + ) + # Diagonal merges + current_labels = _merge_helper( + input_ptr, + labels_ptr, + base_offset, + offsets_h, + offsets_w, + mask_2d, + valid_current, + current_values, + current_labels, + H, + W, + -1, + -1, + ) + current_labels = _merge_helper( + input_ptr, + labels_ptr, + base_offset, + offsets_h, + offsets_w, + mask_2d, + valid_current, + current_values, + current_labels, + H, + W, + -1, + 1, + ) + + # This actually does some path compression, in a lightweight but beneficial way + tl.atomic_min( + labels_ptr + tl.ravel(base_offset + offsets_2d), current_labels, mask=mask_1d + ) + + +# ============================================================================== +# ## Phase 3: Pointer Jumping Kernel +# ============================================================================== +# This kernel performs pointer jumping to ensure that all pixels point directly to their root labels. +# This is done in a loop until convergence. + + +@triton.jit +def _pointer_jump_kernel( + labels_in_ptr, labels_out_ptr, numel: tl.constexpr, BLOCK_SIZE: tl.constexpr +): + """ + Pointer jumping kernel with double buffering to avoid race conditions. + Reads from labels_in_ptr and writes to labels_out_ptr. + """ + # This kernel is launched with a 1D grid, and does not care about batching explicitly. + # By construction, the labels are global indices across the batch, and we never perform + # cross-batch merges, so this is safe. + + pid = tl.program_id(0) + offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < numel + + # Load current labels from input buffer + current_labels = tl.load(labels_in_ptr + offsets, mask=mask, other=-1) + valid_mask = mask & (current_labels != -1) + + # A mask to track which lanes have successfully completed their union. + done_mask = ~valid_mask + while tl_any(~(done_mask | ~valid_mask)): + parent_labels = tl.load( + labels_in_ptr + current_labels, mask=valid_mask, other=-1 + ) + + are_equal = current_labels == parent_labels + done_mask |= are_equal & valid_mask + + current_labels = tl.where( + ~done_mask, tl.minimum(current_labels, parent_labels), current_labels + ) + + # Write to output buffer (safe because we're not reading from it) + tl.store(labels_out_ptr + offsets, current_labels, mask=mask) + + +# ============================================================================== +# ## Phase 4: Kernels for Computing Component Sizes +# ============================================================================== + + +# Step 4.1: Count occurrences of each root label using atomic adds. +@triton.jit +def _count_labels_kernel(labels_ptr, sizes_ptr, numel, BLOCK_SIZE: tl.constexpr): + pid = tl.program_id(0) + offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < numel + + # Load the final, converged labels + labels = tl.load(labels_ptr + offsets, mask=mask, other=-1) + valid_mask = mask & (labels != -1) + + # Atomically increment the counter for each label. This builds a histogram. + tl.atomic_add(sizes_ptr + labels, 1, mask=valid_mask) + + +# Step 4.2: Broadcast the computed sizes back to the output tensor. +@triton.jit +def _broadcast_sizes_kernel( + labels_ptr, sizes_ptr, out_ptr, numel, BLOCK_SIZE: tl.constexpr +): + pid = tl.program_id(0) + offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < numel + + # Load the final labels + labels = tl.load(labels_ptr + offsets, mask=mask, other=-1) + valid_mask = mask & (labels != -1) + + # Look up the size for each label from the histogram + component_sizes = tl.load(sizes_ptr + labels, mask=valid_mask, other=0) + + # Write the size to the final output tensor. Background pixels get size 0. + tl.store(out_ptr + offsets, component_sizes, mask=mask) + + +def connected_components_triton(input_tensor: torch.Tensor): + """ + Computes connected components labeling on a batch of 2D integer tensors using Triton. + + Args: + input_tensor (torch.Tensor): A BxHxW integer tensor or Bx1xHxW. Non-zero values are considered foreground. Bool tensor also accepted + + Returns: + Tuple[torch.Tensor, int]: A tuple containing: + - A BxHxW output tensor with dense labels. Background is 0. + - A BxHxW tensor with the size of the connected component for each pixel. + """ + assert ( + input_tensor.is_cuda and input_tensor.is_contiguous() + ), "Input tensor must be a contiguous CUDA tensor." + out_shape = input_tensor.shape + if input_tensor.dim() == 4 and input_tensor.shape[1] == 1: + input_tensor = input_tensor.squeeze(1) + else: + assert ( + input_tensor.dim() == 3 + ), "Input tensor must be (B, H, W) or (B, 1, H, W)." + + B, H, W = input_tensor.shape + numel = B * H * W + device = input_tensor.device + + # --- Allocate Tensors --- + labels = torch.empty_like(input_tensor, dtype=torch.int32) + output = torch.empty_like(input_tensor, dtype=torch.int32) + + # --- Phase 1 --- + BLOCK_SIZE = 256 + grid_init = (triton.cdiv(numel, BLOCK_SIZE),) + _init_labels_kernel[grid_init]( + input_tensor, + labels, + numel, + BLOCK_SIZE=BLOCK_SIZE, + ) + + # --- Phase 2 --- + grid_local_prop = lambda meta: ( + B, + triton.cdiv(H, meta["BLOCK_SIZE_H"]) * triton.cdiv(W, meta["BLOCK_SIZE_W"]), + ) + _local_prop_kernel[grid_local_prop](labels, input_tensor, H, W) + + # --- Phase 3 --- + BLOCK_SIZE = 256 + grid_jump = lambda meta: (triton.cdiv(numel, meta["BLOCK_SIZE"]),) + _pointer_jump_kernel[grid_jump](labels, output, numel, BLOCK_SIZE=BLOCK_SIZE) + + # --- Phase 4 --- + # Allocate tensor to store the final output sizes + component_sizes_out = torch.empty_like(input_tensor, dtype=torch.int32) + + # Allocate a temporary 1D tensor to act as the histogram + # Size is numel because labels can be up to numel-1 + sizes_histogram = torch.zeros(numel, dtype=torch.int32, device=device) + + # 4.1: Count the occurrences of each label + grid_count = (triton.cdiv(numel, BLOCK_SIZE),) + _count_labels_kernel[grid_count]( + output, sizes_histogram, numel, BLOCK_SIZE=BLOCK_SIZE + ) + + # 2.2: Broadcast the counts to the final output tensor + grid_broadcast = (triton.cdiv(numel, BLOCK_SIZE),) + _broadcast_sizes_kernel[grid_broadcast]( + output, sizes_histogram, component_sizes_out, numel, BLOCK_SIZE=BLOCK_SIZE + ) + return output.view(out_shape) + 1, component_sizes_out.view(out_shape) diff --git a/detect_tools/sam3/sam3/perflib/triton/nms.py b/detect_tools/sam3/sam3/perflib/triton/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..ed800a1e9b5c131386d530df9bbe2be8ad670161 --- /dev/null +++ b/detect_tools/sam3/sam3/perflib/triton/nms.py @@ -0,0 +1,124 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +# Adapted from https://github.com/stackav-oss/conch/blob/main/conch/kernels/vision/nms.py + +import torch +import triton +import triton.language as tl + + +@triton.autotune( + configs=[ + triton.Config({"cxpr_block_size": 128}), + triton.Config({"cxpr_block_size": 256}), + triton.Config({"cxpr_block_size": 512}), + triton.Config({"cxpr_block_size": 1024}), + triton.Config({"cxpr_block_size": 2048}), + triton.Config({"cxpr_block_size": 4096}), + triton.Config({"cxpr_block_size": 8192}), + ], + key=["num_boxes"], +) +@triton.jit +def _nms_suppression_kernel( + # Tensors + iou_mask_ptr: tl.tensor, # [N, N] + keep_mask_ptr: tl.tensor, # [N] + # Scalars + num_boxes: tl.int32, + # Strides + iou_mask_stride: tl.int32, + # Constexprs + cxpr_block_size: tl.constexpr, +) -> None: + """NMS suppression kernel. + + Args: + iou_mask_ptr: Pointer to precomputed IoU mask, shape: (N, N). + keep_mask_ptr: Pointer to keep mask tensor, shape: (N,). + num_boxes: Number of boxes. + iou_mask_stride: Stride for IoU mask tensor. + cxpr_block_size: Block size for processing. + """ + # Sequential NMS: for each box in sorted order, suppress later boxes + for current_box_idx in range(num_boxes - 1): + # Check if current box is still kept + is_kept = tl.load(keep_mask_ptr + current_box_idx) + if is_kept: + # IoU mask row offset for the current box + # Because the IoU mask is sorted by score, we will only consider boxes that come after the current box. + # This means we only need to read the upper triangular part of the IoU mask. + iou_row_offset = current_box_idx * iou_mask_stride + + # Only process boxes that come after the current box + next_box_idx = current_box_idx + 1 + remaining_boxes = num_boxes - next_box_idx + + # Iterate blockwise through the columns + for block_idx in range(tl.cdiv(remaining_boxes, cxpr_block_size)): + # Masked load of indices for the target boxes in the current block + block_start = next_box_idx + block_idx * cxpr_block_size + target_box_offsets = block_start + tl.arange(0, cxpr_block_size) + target_box_mask = target_box_offsets < num_boxes + + # Suppress boxes with lower scores that have high IoU + suppression_mask = tl.load( + iou_mask_ptr + iou_row_offset + target_box_offsets, + mask=target_box_mask, + other=False, + ) + suppression_mask = tl.cast(suppression_mask, tl.int1) + + # Conditionally store suppression result for high-IoU boxes + tl.store( + keep_mask_ptr + target_box_offsets, False, mask=suppression_mask + ) + + # Potential race condition: we need to ensure all threads complete the store before the next + # iteration otherwise we may load stale data for whether or not a box has been suppressed. + tl.debug_barrier() + + +def nms_triton( + ious: torch.Tensor, + scores: torch.Tensor, + iou_threshold: float, +) -> torch.Tensor: + """Perform NMS given the iou matrix, the scores and the iou threshold + + Args: + ious: Pairwise IoU tensor of shape (N, N). + scores: Scores tensor of shape (N,). + iou_threshold: IoU threshold for suppression. + + Returns: + Tensor: Indices of kept boxes, sorted by decreasing score. + """ + assert scores.dim() == 1, "Scores must be 1D" + iou_mask = ious > iou_threshold + assert iou_mask.dim() == 2 + assert iou_mask.shape[0] == iou_mask.shape[1] == scores.shape[0] + assert iou_mask.device == scores.device + assert iou_mask.dtype == torch.bool + + num_boxes = scores.size(0) + keep_mask = torch.ones(len(scores), device=scores.device, dtype=torch.bool) + + # Sort boxes by scores in descending order + _, sorted_indices = torch.sort(scores, dim=0, stable=True, descending=True) + iou_mask = iou_mask[sorted_indices][:, sorted_indices].contiguous() + + # For the suppression stage, we need to process sequentially, but we'll still take + # advantage of parallelism by processing in blocks in one program. + stage2_grid = (1,) + _nms_suppression_kernel[stage2_grid]( + # Tensors + iou_mask_ptr=iou_mask, + keep_mask_ptr=keep_mask, + # Scalars + num_boxes=num_boxes, + # Strides + iou_mask_stride=iou_mask.stride(0), + ) + # Extract indices of kept boxes + return sorted_indices[keep_mask] diff --git a/detect_tools/sam3/sam3/sam/__init__.py b/detect_tools/sam3/sam3/sam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b35da6d52b0366b4ff26fa508c34f5d281e0dfa --- /dev/null +++ b/detect_tools/sam3/sam3/sam/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder +from .transformer import TwoWayTransformer diff --git a/detect_tools/sam3/sam3/sam/common.py b/detect_tools/sam3/sam3/sam/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b6d1587d3394377a8e4b3fd80475d49775da7ccb --- /dev/null +++ b/detect_tools/sam3/sam3/sam/common.py @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from typing import Type + +import torch +import torch.nn as nn + + +class MLPBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.lin2(self.act(self.lin1(x))) + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/detect_tools/sam3/sam3/sam/mask_decoder.py b/detect_tools/sam3/sam3/sam/mask_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ac397bf5f433d58bcafd79f00a4e285f89f140 --- /dev/null +++ b/detect_tools/sam3/sam3/sam/mask_decoder.py @@ -0,0 +1,319 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from typing import List, Optional, Tuple, Type + +import torch +from torch import nn +from torch.nn import functional as F + +from .common import LayerNorm2d + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + use_high_res_features: bool = False, + iou_prediction_use_sigmoid=False, + dynamic_multimask_via_stability=False, + dynamic_multimask_stability_delta=0.05, + dynamic_multimask_stability_thresh=0.98, + pred_obj_scores: bool = False, + pred_obj_scores_mlp: bool = False, + use_multimask_token_for_obj_ptr: bool = False, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.pred_obj_scores = pred_obj_scores + if self.pred_obj_scores: + self.obj_score_token = nn.Embedding(1, transformer_dim) + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + activation(), + ) + self.use_high_res_features = use_high_res_features + if use_high_res_features: + self.conv_s0 = nn.Conv2d( + transformer_dim, transformer_dim // 8, kernel_size=1, stride=1 + ) + self.conv_s1 = nn.Conv2d( + transformer_dim, transformer_dim // 4, kernel_size=1, stride=1 + ) + + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, + iou_head_hidden_dim, + self.num_mask_tokens, + iou_head_depth, + sigmoid_output=iou_prediction_use_sigmoid, + ) + if self.pred_obj_scores: + self.pred_obj_score_head = nn.Linear(transformer_dim, 1) + if pred_obj_scores_mlp: + self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3) + + # When outputting a single mask, optionally we can dynamically fall back to the best + # multimask output token if the single mask output token gives low stability scores. + self.dynamic_multimask_via_stability = dynamic_multimask_via_stability + self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta + self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + torch.Tensor: batched SAM token for mask output + """ + masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + repeat_image=repeat_image, + high_res_features=high_res_features, + ) + + # Select the correct mask or masks for output + if multimask_output: + masks = masks[:, 1:, :, :] + iou_pred = iou_pred[:, 1:] + elif self.dynamic_multimask_via_stability and not self.training: + masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) + else: + masks = masks[:, 0:1, :, :] + iou_pred = iou_pred[:, 0:1] + + if multimask_output and self.use_multimask_token_for_obj_ptr: + sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape + else: + # Take the mask output token. Here we *always* use the token for single mask output. + # At test time, even if we track after 1-click (and using multimask_output=True), + # we still take the single mask token here. The rationale is that we always track + # after multiple clicks during training, so the past tokens seen during training + # are always the single mask token (and we'll let it be the object-memory token). + sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape + + # Prepare output + return masks, iou_pred, sam_tokens_out, object_score_logits + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + s = 0 + if self.pred_obj_scores: + output_tokens = torch.cat( + [ + self.obj_score_token.weight, + self.iou_token.weight, + self.mask_tokens.weight, + ], + dim=0, + ) + s = 1 + else: + output_tokens = torch.cat( + [self.iou_token.weight, self.mask_tokens.weight], dim=0 + ) + output_tokens = output_tokens.unsqueeze(0).expand( + sparse_prompt_embeddings.size(0), -1, -1 + ) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + if repeat_image: + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + else: + assert image_embeddings.shape[0] == tokens.shape[0] + src = image_embeddings + src = src + dense_prompt_embeddings + assert ( + image_pe.size(0) == 1 + ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)" + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, s, :] + mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + if not self.use_high_res_features: + upscaled_embedding = self.output_upscaling(src) + else: + dc1, ln1, act1, dc2, act2 = self.output_upscaling + feat_s0, feat_s1 = high_res_features + upscaled_embedding = act1(ln1(dc1(src) + feat_s1)) + upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0) + + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append( + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) + ) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + if self.pred_obj_scores: + assert s == 1 + object_score_logits = self.pred_obj_score_head(hs[:, 0, :]) + else: + # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1 + object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1) + + return masks, iou_pred, mask_tokens_out, object_score_logits + + def _get_stability_scores(self, mask_logits): + """ + Compute stability scores of the mask logits based on the IoU between upper and + lower thresholds. + """ + mask_logits = mask_logits.flatten(-2) + stability_delta = self.dynamic_multimask_stability_delta + area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() + area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() + stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) + return stability_scores + + def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): + """ + When outputting a single mask, if the stability score from the current single-mask + output (based on output token 0) falls below a threshold, we instead select from + multi-mask outputs (based on output token 1~3) the mask with the highest predicted + IoU score. This is intended to ensure a valid mask for both clicking and tracking. + """ + # The best mask from multimask output tokens (1~3) + multimask_logits = all_mask_logits[:, 1:, :, :] + multimask_iou_scores = all_iou_scores[:, 1:] + best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) + batch_inds = torch.arange( + multimask_iou_scores.size(0), device=all_iou_scores.device + ) + best_multimask_logits = multimask_logits[batch_inds, best_scores_inds] + best_multimask_logits = best_multimask_logits.unsqueeze(1) + best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds] + best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1) + + # The mask from singlemask output token 0 and its stability score + singlemask_logits = all_mask_logits[:, 0:1, :, :] + singlemask_iou_scores = all_iou_scores[:, 0:1] + stability_scores = self._get_stability_scores(singlemask_logits) + is_stable = stability_scores >= self.dynamic_multimask_stability_thresh + + # Dynamically fall back to best multimask output upon low stability scores. + mask_logits_out = torch.where( + is_stable[..., None, None].expand_as(singlemask_logits), + singlemask_logits, + best_multimask_logits, + ) + iou_scores_out = torch.where( + is_stable.expand_as(singlemask_iou_scores), + singlemask_iou_scores, + best_multimask_iou_scores, + ) + return mask_logits_out, iou_scores_out + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/detect_tools/sam3/sam3/sam/prompt_encoder.py b/detect_tools/sam3/sam3/sam/prompt_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..145ea9fff05d2d0ec55007bd8c08662c725d362a --- /dev/null +++ b/detect_tools/sam3/sam3/sam/prompt_encoder.py @@ -0,0 +1,243 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from typing import Any, Optional, Tuple, Type + +import numpy as np +import torch +from torch import nn + +from .common import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [ + nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings) + ] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = ( + 4 * image_embedding_size[0], + 4 * image_embedding_size[1], + ) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords( + points, self.input_image_size + ) + + point_embedding = torch.where( + (labels == -1).unsqueeze(-1), + torch.zeros_like(point_embedding) + self.not_a_point_embed.weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 0).unsqueeze(-1), + point_embedding + self.point_embeddings[0].weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 1).unsqueeze(-1), + point_embedding + self.point_embeddings[1].weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 2).unsqueeze(-1), + point_embedding + self.point_embeddings[2].weight, + point_embedding, + ) + point_embedding = torch.where( + (labels == 3).unsqueeze(-1), + point_embedding + self.point_embeddings[3].weight, + point_embedding, + ) + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords( + coords, self.input_image_size + ) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/detect_tools/sam3/sam3/sam/rope.py b/detect_tools/sam3/sam3/sam/rope.py new file mode 100644 index 0000000000000000000000000000000000000000..2db01b66765fca3cc074e883076bedf2a345d1f5 --- /dev/null +++ b/detect_tools/sam3/sam3/sam/rope.py @@ -0,0 +1,161 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Adapted from: +1. https://github.com/meta-llama/codellama/blob/main/llama/model.py +2. https://github.com/naver-ai/rope-vit +3. https://github.com/lucidrains/rotary-embedding-torch +""" + +from typing import Optional + +import torch +from einops import rearrange, repeat +from torch import broadcast_tensors, nn + + +def init_t_xy(end_x: int, end_y: int, scale: float = 1.0, offset: int = 0, device=None): + t = torch.arange(end_x * end_y, dtype=torch.float32, device=device) + t_x = (t % end_x).float() + t_y = torch.div(t, end_x, rounding_mode="floor").float() + return t_x * scale + offset, t_y * scale + offset + + +def compute_axial_cis( + dim: int, + end_x: int, + end_y: int, + theta: float = 10000.0, + scale_pos: float = 1.0, + offset: int = 0, + device=None, +): + freqs_x = 1.0 / ( + theta ** (torch.arange(0, dim, 4, device=device)[: (dim // 4)].float() / dim) + ) + freqs_y = 1.0 / ( + theta ** (torch.arange(0, dim, 4, device=device)[: (dim // 4)].float() / dim) + ) + + t_x, t_y = init_t_xy(end_x, end_y, scale_pos, offset, device=device) + freqs_x = torch.outer(t_x, freqs_x) + freqs_y = torch.outer(t_y, freqs_y) + freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x) + freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y) + return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1) + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[-2], x.shape[-1]) + shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_enc( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, + repeat_freqs_k: bool = False, +): + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = ( + torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + if xk.shape[-2] != 0 + else None + ) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + if xk_ is None: + # no keys to rotate, due to dropout + return xq_out.type_as(xq).to(xq.device), xk + # repeat freqs along seq_len dim to match k seq_len + if repeat_freqs_k: + r = xk_.shape[-2] // xq_.shape[-2] + freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) + + +def complex_mult(xq_real, xq_imag, freqs_cis_real, freqs_cis_imag): + # Compute the real part of the product + real_part = xq_real * freqs_cis_real - xq_imag * freqs_cis_imag + # Compute the imaginary part of the product + imag_part = xq_real * freqs_cis_imag + xq_imag * freqs_cis_real + # Stack the real and imaginary parts along the last dimension + return torch.stack([real_part, imag_part], dim=-1) + + +def apply_rotary_enc_real( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis_real: torch.Tensor, + freqs_cis_imag: torch.Tensor, + repeat_freqs_k: bool = False, +): + assert xk is not None + assert xk.shape[-2] != 0 + + xq_real = xq.float().reshape(*xq.shape[:-1], -1, 2)[..., 0] + xq_imag = xq.float().reshape(*xq.shape[:-1], -1, 2)[..., 1] + xk_real = xk.float().reshape(*xk.shape[:-1], -1, 2)[..., 0] + xk_imag = xk.float().reshape(*xk.shape[:-1], -1, 2)[..., 1] + freqs_cis_real = reshape_for_broadcast(freqs_cis_real, xq_real) + freqs_cis_imag = reshape_for_broadcast(freqs_cis_imag, xq_imag) + xq_out = complex_mult(xq_real, xq_imag, freqs_cis_real, freqs_cis_imag).flatten(3) + if repeat_freqs_k: + r = xk_real.shape[-2] // xq_real.shape[-2] + freqs_cis_real = freqs_cis_real.repeat(*([1] * (freqs_cis_real.ndim - 2)), r, 1) + freqs_cis_imag = freqs_cis_imag.repeat(*([1] * (freqs_cis_imag.ndim - 2)), r, 1) + xk_out = complex_mult(xk_real, xk_imag, freqs_cis_real, freqs_cis_imag).flatten(3) + # xq_out = torch.view_as_real(torch.complex(xq_real, xq_imag) * torch.complex(freqs_cis_real, freqs_cis_imag)).flatten(3) + # xk_out = torch.view_as_real(torch.compelx(xk_real, xk_imag) * torch.complex(freqs_cis_real, freqs_cis_imag)).flatten(3) + return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) + + +# rotary embedding helper functions +def broadcat(tensors, dim=-1): + broadcasted_tensors = broadcast_tensors(*tensors) + return torch.cat(broadcasted_tensors, dim=dim) + + +def rotate_half(x: torch.Tensor): + x = rearrange(x, "... (d r) -> ... d r", r=2) + x1, x2 = x.unbind(dim=-1) + x = torch.stack((-x2, x1), dim=-1) + return rearrange(x, "... d r -> ... (d r)") + + +class VisionRotaryEmbeddingVE(nn.Module): + def __init__( + self, + dim: int, + seq_len: int, + pt_seq_len: Optional[int] = None, + theta: float = 10000.0, + offset: int = 1, # specific to VE + ): + super().__init__() + + freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) + scale = 1.0 + if pt_seq_len is not None: + scale = pt_seq_len / seq_len + + # offset of +1 following VE - even though for the + # attention op only differences matter + t = torch.arange(seq_len) * scale + offset + + freqs = torch.einsum("..., f -> ... f", t, freqs) + freqs = repeat(freqs, "... n -> ... (n r)", r=2) + + freqs = broadcat((freqs[None, :, :], freqs[:, None, :]), dim=-1) + freqs_cos = freqs.cos().view(-1, freqs.shape[-1]) + freqs_sin = freqs.sin().view(-1, freqs.shape[-1]) + + self.register_buffer("freqs_cos", freqs_cos) + self.register_buffer("freqs_sin", freqs_sin) + + def forward(self, t: torch.Tensor): + return t * self.freqs_cos + rotate_half(t) * self.freqs_sin diff --git a/detect_tools/sam3/sam3/sam/transformer.py b/detect_tools/sam3/sam3/sam/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..3e96c28331bfa6726f3bec6dc216137c9a92a30c --- /dev/null +++ b/detect_tools/sam3/sam3/sam/transformer.py @@ -0,0 +1,358 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math +from functools import partial +from typing import Tuple, Type + +import torch +import torch.nn.functional as F + +from sam3.sam.rope import apply_rotary_enc, apply_rotary_enc_real, compute_axial_cis +from torch import nn, Tensor + +from .common import MLPBlock + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attention layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + dropout: float = 0.0, + kv_in_dim: int = None, + use_fa3: bool = False, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + self.use_fa3 = use_fa3 + assert ( + self.internal_dim % num_heads == 0 + ), "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + self.dropout_p = dropout + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + # with torch.backends.cuda.sdp_kernel( + # enable_flash=USE_FLASH_ATTN, + # # if Flash attention kernel is off, then math kernel needs to be enabled + # enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON, + # enable_mem_efficient=OLD_GPU, + # ): + # Let's trust the dispatcher.... + if self.use_fa3: + from sam3.perflib.fa3 import flash_attn_func + + assert dropout_p == 0.0 + out = flash_attn_func( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) + ).transpose(1, 2) + else: + torch.backends.cuda.enable_flash_sdp(True) + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(True) + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out + + +class RoPEAttention(Attention): + """Attention with rotary position encoding.""" + + def __init__( + self, + *args, + rope_theta=10000.0, + # whether to repeat q rope to match k length + # this is needed for cross-attention to memories + rope_k_repeat=False, + feat_sizes=(64, 64), # [w, h] for stride 16 feats at 1024 resolution + use_rope_real=False, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.use_rope_real = use_rope_real + self.compute_cis = partial( + compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta + ) + device = torch.device("cuda") if torch.cuda.is_available() else None + self.freqs_cis = self.compute_cis( + end_x=feat_sizes[0], end_y=feat_sizes[1], device=device + ) + if self.use_rope_real: + self.freqs_cis_real = self.freqs_cis.real + self.freqs_cis_imag = self.freqs_cis.imag + self.rope_k_repeat = rope_k_repeat + + def forward( + self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0 + ) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Apply rotary position encoding + w = h = math.sqrt(q.shape[-2]) + if self.freqs_cis.shape[0] != q.shape[-2]: + self.freqs_cis = self.compute_cis(end_x=w, end_y=h, device=q.device) + self.freqs_cis_real = self.freqs_cis.real + self.freqs_cis_imag = self.freqs_cis.imag + if q.shape[-2] != k.shape[-2]: + assert self.rope_k_repeat + + num_k_rope = k.size(-2) - num_k_exclude_rope + if self.use_rope_real: + q, k[:, :, :num_k_rope] = apply_rotary_enc_real( + q, + k[:, :, :num_k_rope], + freqs_cis_real=self.freqs_cis_real, + freqs_cis_imag=self.freqs_cis_imag, + repeat_freqs_k=self.rope_k_repeat, + ) + else: + q, k[:, :, :num_k_rope] = apply_rotary_enc( + q, + k[:, :, :num_k_rope], + self.freqs_cis, + repeat_freqs_k=self.rope_k_repeat, + ) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + # with torch.backends.cuda.sdp_kernel( + # enable_flash=USE_FLASH_ATTN, + # # if Flash attention kernel is off, then math kernel needs to be enabled + # enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON, + # enable_mem_efficient=OLD_GPU, + # ): + # Let's trust the dispatcher.... + if self.use_fa3: + from sam3.perflib.fa3 import flash_attn_func + + assert dropout_p == 0.0 + out = flash_attn_func( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) + ).transpose(1, 2) + else: + torch.backends.cuda.enable_flash_sdp(True) + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(True) + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/detect_tools/sam3/sam3/train/__init__.py b/detect_tools/sam3/sam3/train/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/train/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/train/configs/eval_base.yaml b/detect_tools/sam3/sam3/train/configs/eval_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20890d86459218b95340ab6fd7f931f689e1a100 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/eval_base.yaml @@ -0,0 +1,279 @@ +# @package _global_ +defaults: + - _self_ + +# This config is the base configuration for all evaluations. Amongst other things, it defines: +# - the model +# - the image transforms +# - the post processors +# - cluster configuration (only relevant for slurm-based evals, ignored otherwise) +# +# Most of the parameters should be kept as-is. The main modifications you may want to make are: +# - the cluster configuration, to adjust partitions/qos to your system +# - the flag gather_pred_via_filesys if you ram is tight +# - num_val_workers if your number of cores is small (should be roughly number of cores / number of gpus) +# - the paths below + + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + # If you leave the checkpoint path to null, the model will be downloaded from hugging-face. Otherwise provide a path + checkpoint_path: null + # the experiments will be subfolders of this + base_experiment_log_dir: + + # base path to the annotation folder for gold (refer to the readmes on how to download) + base_annotation_path: + + # base path to the annotation folder for silver (refer to the readmes on how to download) + base_annotation_path_silver: + + # path to the metaclip images, used for SA-Co gold (refer to the readme for instructions). Can be null if you don't intend on evaluating on this dataset. + metaclip_img_path: + + # path to the sa1b images, used for SA-Co gold (refer to the readme for instructions). Can be null if you don't intend on evaluating on this dataset. + sa1b_img_path: + + # path to the SA-Co/silver images + silver_img_path: + + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + + use_presence_eval: True + + base_val_transform: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + ######## transforms for validation (begin) ######## + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: False + ######## transforms for validation (end) ######## + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + loss: null + + # Model parameters + d_model: 256 + input_box_embedding_dim: ${add:${scratch.d_model},2} + + # Box processing + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 #infinite detections + use_original_ids: false + use_original_sizes_box: false + use_presence: ${scratch.use_presence_eval} + + box_postprocessor_thresholded: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 #infinite detections + use_original_ids: false + use_original_sizes_box: false + detection_threshold: 0.3 + use_presence: ${scratch.use_presence_eval} + + mask_postprocessor_thresholded: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 #infinite detections + iou_type: "segm" + use_original_ids: false + use_original_sizes_box: false + use_original_sizes_mask: true + convert_mask_to_rle: True + detection_threshold: 0.3 + use_presence: ${scratch.use_presence_eval} + + # Image processing parameters + resolution: 1008 + max_ann_per_img: 200 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + train_batch_size: 1 + val_batch_size: 1 + num_train_workers: 0 + num_val_workers: 10 # change this depending on the number of cpu cores available + max_data_epochs: 20 + target_epoch_size: 1500 + hybrid_repeats: 1 + context_length: 2 + + # All reduce - this controls how the predictions are sent back to node 0. + # If you have a lot of ram, CPU gather is faster. Otherwise, we provide a fallback through filesystem (eg NFS) + # Switch to true if you get cpu ooms during gather. + gather_pred_via_filesys: false + + # Learning rate and scheduler parameters (unused for eval) + lr_scale: 0.1 + lr_transformer: ${times:8e-4,${scratch.lr_scale}} + lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}} + lr_language_backbone: ${times:5e-5,${scratch.lr_scale}} + lrd_vision_backbone: 0.9 # (lower for in-domain adn higher for ood) + wd: 0.1 + scheduler_timescale: 20 + scheduler_warmup: 20 + scheduler_cooldown: 20 + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: null + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true + enable_segmentation: true # Warning: Enable this if using segmentation. + checkpoint_path: ${paths.checkpoint_path} + + meters: + val: null + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: sam3.train.optim.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: ${scratch.lrd_vision_backbone} + apply_to: 'backbone.vision_backbone.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: # transformer and class_embed + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_transformer} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_vision_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.vision_backbone.*' + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_language_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.language_backbone.*' + + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: ${scratch.wd} + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 4 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + + +submitit: + account: null # Add your SLURM account if use_cluster == 1 + partition: null + qos: null # Add your QoS if use_cluster == 1 + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_attributes.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_attributes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8646b691734e1dd191d53e700d9b7dcb2c23de72 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_attributes.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_attributes/ + coco_gt: ${paths.base_annotation_path}/gold_attributes_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_attributes_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_attributes_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_attributes_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.metaclip_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_attributes + + meters: + val: + gold_attributes: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_attributes + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_crowded.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_crowded.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fef74a6ee56c901c258a8ced2beff773a38ec545 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_crowded.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_crowded/ + coco_gt: ${paths.base_annotation_path}/gold_crowded_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_crowded_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_crowded_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_crowded_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.metaclip_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_crowded + + meters: + val: + gold_crowded: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_crowded + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_food.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_food.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b08c4a46921db2123f2540a63536140ea641320e --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_food.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_fg_food/ + coco_gt: ${paths.base_annotation_path}/gold_fg_food_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_fg_food_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_fg_food_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_fg_food_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.metaclip_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_fg_food + + meters: + val: + gold_fg_food: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_fg_food + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_sports.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_sports.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89a93be2acf36ab0f84481dfded86340da97b9a6 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_sports.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_fg_sports_equipment/ + coco_gt: ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.metaclip_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_fg_sports_equipment + + meters: + val: + gold_fg_sports_equipment: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_fg_sports_equipment + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9c276f4299d4a53d5b44cea5194918541a0d25d --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_metaclip_nps/ + coco_gt: ${paths.base_annotation_path}/gold_metaclip_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_metaclip_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_metaclip_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_metaclip_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.metaclip_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_metaclip_nps + + meters: + val: + gold_metaclip_nps: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_metaclip_nps + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52c87ee30545d24502160e7e8e3a565ce8d83bf2 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_sa1b_nps/ + coco_gt: ${paths.base_annotation_path}/gold_sa1b_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_sa1b_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_sa1b_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_sa1b_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.sa1b_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_sa1b_nps + + meters: + val: + gold_sa1b_nps: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_sa1b_nps + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_wiki_common.yaml b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_wiki_common.yaml new file mode 100644 index 0000000000000000000000000000000000000000..630495423c3840f0e795ee3c501ee5f5b44a3505 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_wiki_common.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_wiki_common/ + coco_gt: ${paths.base_annotation_path}/gold_wiki_common_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_wiki_common_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_wiki_common_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_wiki_common_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.metaclip_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_wiki_common + + meters: + val: + gold_wiki_common: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_wiki_common + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_and_visual.yaml b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_and_visual.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51e93b457c471c861fc6a3a4fd65ced25119aca1 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_and_visual.yaml @@ -0,0 +1,255 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + +supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} +# Validation transforms pipeline +val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.TextQueryToVisual + keep_text_queries: true # Note: set this to false if you only want visual + probability: 1.0 # always + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: True + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Image processing parameters + resolution: 1008 + # Normalization parameters + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + val_batch_size: 2 + num_val_workers: 0 + gather_pred_via_filesys: false + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + max_epochs: 1 + accelerator: cuda + seed_value: 123 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + transforms: ${val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: true + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + job_array: + num_tasks: 13 + task_index: 0 + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only.yaml b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e28fa5df5308a550fccb14f66de4f67a928aff8f --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only.yaml @@ -0,0 +1,253 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + + +supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} +# Validation transforms pipeline +val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: True + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Image processing parameters + resolution: 1008 + # Normalization parameters + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + val_batch_size: 2 + num_val_workers: 0 + gather_pred_via_filesys: false + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + max_epochs: 1 + accelerator: cuda + seed_value: 123 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + transforms: ${val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/odinw/${supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: False + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + job_array: + num_tasks: 13 + task_index: 0 + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only_positive.yaml b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only_positive.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a86a5230935a1a5dc5b15c14066bb811f3a21de --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only_positive.yaml @@ -0,0 +1,253 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + + +supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} +# Validation transforms pipeline +val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: True + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Image processing parameters + resolution: 1008 + # Normalization parameters + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + val_batch_size: 2 + num_val_workers: 0 + gather_pred_via_filesys: false + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + max_epochs: 1 + accelerator: cuda + seed_value: 123 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + transforms: ${val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: true + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + job_array: + num_tasks: 13 + task_index: 0 + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only_train.yaml b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only_train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eb03cdf908df88e95c74742e68cb7f243db1ebe1 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_text_only_train.yaml @@ -0,0 +1,591 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + + +odinw_train: + train_file: fewshot_train_shot10_seed300 + num_images: null + supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} + # Training transforms pipeline + train_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterCrowds + - _target_: sam3.train.transforms.point_sampling.RandomizeInputBbox + box_noise_std: 0.1 + box_noise_max: 20 + - _target_: sam3.train.transforms.segmentation.DecodeRle + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: + _target_: sam3.train.transforms.basic.get_random_resize_scales + size: ${scratch.resolution} + min_size: 480 + rounded: false + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.PadToSizeAPI + size: ${scratch.resolution} + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.train_norm_mean} + std: ${scratch.train_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterFindQueriesWithTooManyOut + max_num_objects: ${scratch.max_ann_per_img} + + # Validation transforms pipeline + val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # loss config (no mask loss) + loss: + _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper + matcher: ${scratch.matcher} + o2m_weight: 2.0 + o2m_matcher: + _target_: sam3.train.matcher.BinaryOneToManyMatcher + alpha: 0.3 + threshold: 0.4 + topk: 4 + use_o2m_matcher_on_o2m_aux: ${scratch.use_o2m_matcher_on_o2m_aux} + loss_fns_find: + - _target_: sam3.train.loss.loss_fns.Boxes + weight_dict: + loss_bbox: 5.0 + loss_giou: 2.0 + - _target_: sam3.train.loss.loss_fns.IABCEMdetr + weak_loss: False + weight_dict: + loss_ce: ${scratch.loss_ce_weight} # Change + presence_loss: ${scratch.presence_weight} # Change + pos_weight: ${scratch.iabce_pos_weight} + alpha: ${scratch.iabce_alpha} + gamma: 2 + use_presence: True # Change + pos_focal: ${scratch.iabce_pos_focal} + pad_n_queries: ${scratch.num_queries} + pad_scale_pos: ${scratch.instance_query_loss_pad_scale_pos} + + loss_fn_semantic_seg: null + scale_by_find_batch_size: ${scratch.scale_by_find_batch_size} + + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: False + use_act_checkpoint_geo_encoder: True + input_geometry_encoder: + _target_: sam3.model.geometry_encoders.SequenceGeometryEncoder + pos_enc: ${scratch.pos_embed} + encode_boxes_as_points: False + points_direct_project: True + points_pool: True + points_pos_enc: True + boxes_direct_project: True + boxes_pool: True + boxes_pos_enc: True + d_model: ${scratch.d_model} + num_layers: 3 + use_act_ckpt: ${scratch.use_act_checkpoint_geo_encoder} + layer: + _target_: sam3.model.encoder.TransformerEncoderLayer + activation: "relu" + d_model: ${scratch.d_model} + dim_feedforward: 2048 + dropout: ${scratch.encoder_dropout} + pos_enc_at_attn: false + pre_norm: True + pos_enc_at_cross_attn_queries: false + pos_enc_at_cross_attn_keys: true + self_attention: + _target_: sam3.model.attention.MultiheadAttention + attn_type: Vanilla + num_heads: 8 + dropout: ${scratch.encoder_dropout} + embed_dim: ${scratch.d_model} + batch_first: False + cross_attention: + _target_: sam3.model.attention.MultiheadAttention + attn_type: Vanilla + num_heads: 8 + dropout: ${scratch.encoder_dropout} + embed_dim: ${scratch.d_model} + batch_first: False + add_cls: true + add_post_encode_proj: True + + boxRPB: "log" + dac: True + use_early_fusion: true + o2m_mask: false + num_feature_levels: 1 # > 1 not implemented + encoder_dropout: 0.1 + decoder_dropout: 0.1 + + tokenizer_ve: + _target_: sam3.model.tokenizer_ve.SimpleTokenizer + bpe_path: ${paths.bpe_path} + + + freeze_text_tower: False + freeze_image_tower: NoFreeze + vis_backbone_dp: 0.0 + # Activation checkpointing (Save memory) + use_act_checkpoint_vision_backbone: True + use_act_checkpoint_text_backbone: True + use_act_checkpoint_encoder: True + use_act_checkpoint_decoder: True + + loss: null + # Loss parameters + num_queries: 200 + presence_weight: 20.0 + loss_ce_weight: 20.0 + iabce_pos_weight: 5.0 + iabce_pos_focal: false + iabce_alpha: 0.25 + instance_query_loss_pad_scale_pos: 1.0 + use_o2m_matcher_on_o2m_aux: false + + # Model parameters + use_instance_query: true + d_model: 256 + pos_embed: + _target_: sam3.model.position_encoding.PositionEmbeddingSine + num_pos_feats: ${scratch.d_model} + normalize: true + scale: null + temperature: 10000 + + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + + # Matcher configuration + matcher: + _target_: sam3.train.matcher.BinaryHungarianMatcherV2 + focal: true + cost_class: 2.0 + cost_bbox: 5.0 + cost_giou: 2.0 + alpha: 0.25 + gamma: 2 + stable: False + scale_by_find_batch_size: True + + # Image processing parameters + resolution: 1008 + consistent_transform: False + max_ann_per_img: 200 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + train_batch_size: 1 + val_batch_size: 1 + num_train_workers: 0 + num_val_workers: 0 + max_data_epochs: 40 + target_epoch_size: 1500 + hybrid_repeats: 1 + context_length: 2 + gather_pred_via_filesys: false + + # Learning rate and scheduler parameters + lr_scale: 0.1 + lr_transformer: ${times:8e-4,${scratch.lr_scale}} + lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}} + lr_language_backbone: ${times:5e-5,${scratch.lr_scale}} + lrd_vision_backbone: 0.9 + wd: 0.1 + scheduler_timescale: 20 + scheduler_warmup: 20 + scheduler_cooldown: 20 + + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + # _target_: sam3.train.trainer.Trainer + # skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: train + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: ${odinw_train.loss} + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + limit_ids: ${odinw_train.num_images} + transforms: ${odinw_train.train_transforms} + load_segmentation: ${scratch.enable_segmentation} + max_ann_per_img: 500000 + multiplier: 1 + max_train_queries: 50000 + max_val_queries: 50000 + training: true + use_caching: False + img_folder: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.train.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.train.json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${odinw_train.supercategory_tuple.name}} #${odinw_train.supercategory_tuple.name) + _partial_: true + shuffle: True + batch_size: ${scratch.train_batch_size} + num_workers: ${scratch.num_train_workers} + pin_memory: False + drop_last: True + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: all + with_seg_masks: ${scratch.enable_segmentation} + + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + load_segmentation: ${scratch.enable_segmentation} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${odinw_train.supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.val.json} + transforms: ${odinw_train.val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + with_seg_masks: ${scratch.enable_segmentation} + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: false # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/odinw/${odinw_train.supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: False + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: sam3.train.optim.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: ${scratch.lrd_vision_backbone} + apply_to: 'backbone.vision_backbone.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: # transformer and class_embed + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_transformer} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_vision_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.vision_backbone.*' + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_language_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.language_backbone.*' + + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: ${scratch.wd} + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${odinw_train.supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: null #${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + # task_index: 2 + # Uncomment for job array configuration + job_array: + num_tasks: 13 + task_index: 0 + + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + train: + img_folder: AerialMaritimeDrone/large/train/ + json: AerialMaritimeDrone/large/train/${odinw_train.train_file}.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + train: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/train/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/train/${odinw_train.train_file}.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + train: + img_folder: CottontailRabbits/train/ + json: CottontailRabbits/train/${odinw_train.train_file}.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + train: + img_folder: EgoHands/generic/train/ + json: EgoHands/generic/train/${odinw_train.train_file}.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + train: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/train/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/train/${odinw_train.train_file}.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + train: + img_folder: Packages/Raw/train/ + json: Packages/Raw/train/${odinw_train.train_file}.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + train: + img_folder: PascalVOC/train/ + json: PascalVOC/train/${odinw_train.train_file}.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + train: + img_folder: Raccoon/Raccoon.v2-raw.coco/train/ + json: Raccoon/Raccoon.v2-raw.coco/train/${odinw_train.train_file}.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + train: + img_folder: ShellfishOpenImages/raw/train/ + json: ShellfishOpenImages/raw/train/${odinw_train.train_file}.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + train: + img_folder: VehiclesOpenImages/416x416/train/ + json: VehiclesOpenImages/416x416/train/${odinw_train.train_file}.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + train: + img_folder: pistols/export/ + json: pistols/export/${odinw_train.train_file}.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + train: + img_folder: pothole/train/ + json: pothole/train/${odinw_train.train_file}.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + train: + img_folder: thermalDogsAndPeople/train/ + json: thermalDogsAndPeople/train/${odinw_train.train_file}.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/detect_tools/sam3/sam3/train/configs/odinw13/odinw_visual_only.yaml b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_visual_only.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e724f2d871e3af412078d744220f781a11b6f56f --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/odinw13/odinw_visual_only.yaml @@ -0,0 +1,256 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + + +supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} +# Validation transforms pipeline +val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.TextQueryToVisual + keep_text_queries: false # Note: set this to false if you only want visual + probability: 1.0 # always + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: True + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Image processing parameters + resolution: 1008 + # Normalization parameters + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + val_batch_size: 2 + num_val_workers: 0 + gather_pred_via_filesys: false + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + max_epochs: 1 + accelerator: cuda + seed_value: 123 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + transforms: ${val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: true + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + job_array: + num_tasks: 13 + task_index: 0 + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/detect_tools/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_eval.yaml b/detect_tools/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_eval.yaml new file mode 100644 index 0000000000000000000000000000000000000000..361e622bc0dd57ce361b449972d61d7dad6042e4 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_eval.yaml @@ -0,0 +1,539 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + roboflow_vl_100_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + +# Roboflow dataset configuration +roboflow_train: + num_images: 100 # Note: This is the number of images used for training. If null, all images are used. + supercategory: ${all_roboflow_supercategories.${string:${submitit.job_array.task_index}}} + + # Training transforms pipeline + train_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterCrowds + - _target_: sam3.train.transforms.point_sampling.RandomizeInputBbox + box_noise_std: 0.1 + box_noise_max: 20 + - _target_: sam3.train.transforms.segmentation.DecodeRle + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: + _target_: sam3.train.transforms.basic.get_random_resize_scales + size: ${scratch.resolution} + min_size: 480 + rounded: false + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.PadToSizeAPI + size: ${scratch.resolution} + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.train_norm_mean} + std: ${scratch.train_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterFindQueriesWithTooManyOut + max_num_objects: ${scratch.max_ann_per_img} + + # Validation transforms pipeline + val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.train_norm_mean} + std: ${scratch.train_norm_std} + + # loss config (no mask loss) + loss: + _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper + matcher: ${scratch.matcher} + o2m_weight: 2.0 + o2m_matcher: + _target_: sam3.train.matcher.BinaryOneToManyMatcher + alpha: 0.3 + threshold: 0.4 + topk: 4 + use_o2m_matcher_on_o2m_aux: false # Another option is true + loss_fns_find: + - _target_: sam3.train.loss.loss_fns.Boxes + weight_dict: + loss_bbox: 5.0 + loss_giou: 2.0 + - _target_: sam3.train.loss.loss_fns.IABCEMdetr + weak_loss: False + weight_dict: + loss_ce: 20.0 # Another option is 100.0 + presence_loss: 20.0 + pos_weight: 10.0 # Another option is 5.0 + alpha: 0.25 + gamma: 2 + use_presence: True # Change + pos_focal: false + pad_n_queries: 200 + pad_scale_pos: 1.0 + + loss_fn_semantic_seg: null + scale_by_find_batch_size: ${scratch.scale_by_find_batch_size} + + + # NOTE: Loss to be used for training in case of segmentation + # loss: + # _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper + # matcher: ${scratch.matcher} + # o2m_weight: 2.0 + # o2m_matcher: + # _target_: sam3.train.matcher.BinaryOneToManyMatcher + # alpha: 0.3 + # threshold: 0.4 + # topk: 4 + # use_o2m_matcher_on_o2m_aux: false + # loss_fns_find: + # - _target_: sam3.train.loss.loss_fns.Boxes + # weight_dict: + # loss_bbox: 5.0 + # loss_giou: 2.0 + # - _target_: sam3.train.loss.loss_fns.IABCEMdetr + # weak_loss: False + # weight_dict: + # loss_ce: 20.0 # Another option is 100.0 + # presence_loss: 20.0 + # pos_weight: 10.0 # Another option is 5.0 + # alpha: 0.25 + # gamma: 2 + # use_presence: True # Change + # pos_focal: false + # pad_n_queries: 200 + # pad_scale_pos: 1.0 + # - _target_: sam3.train.loss.loss_fns.Masks + # focal_alpha: 0.25 + # focal_gamma: 2.0 + # weight_dict: + # loss_mask: 200.0 + # loss_dice: 10.0 + # compute_aux: false + # loss_fn_semantic_seg: + # _target_: sam3.losses.loss_fns.SemanticSegCriterion + # presence_head: True + # presence_loss: False # Change + # focal: True + # focal_alpha: 0.6 + # focal_gamma: 2.0 + # downsample: False + # weight_dict: + # loss_semantic_seg: 20.0 + # loss_semantic_presence: 1.0 + # loss_semantic_dice: 30.0 + # scale_by_find_batch_size: ${scratch.scale_by_find_batch_size} + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: False # NOTE: This is the number of queries used for segmentation + # Model parameters + d_model: 256 + pos_embed: + _target_: sam3.model.position_encoding.PositionEmbeddingSine + num_pos_feats: ${scratch.d_model} + normalize: true + scale: null + temperature: 10000 + + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Matcher configuration + matcher: + _target_: sam3.train.matcher.BinaryHungarianMatcherV2 + focal: true # with `focal: true` it is equivalent to BinaryFocalHungarianMatcher + cost_class: 2.0 + cost_bbox: 5.0 + cost_giou: 2.0 + alpha: 0.25 + gamma: 2 + stable: False + scale_by_find_batch_size: True + + # Image processing parameters + resolution: 1008 + consistent_transform: False + max_ann_per_img: 200 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + num_train_workers: 10 + num_val_workers: 0 + max_data_epochs: 20 + target_epoch_size: 1500 + hybrid_repeats: 1 + context_length: 2 + gather_pred_via_filesys: false + + # Learning rate and scheduler parameters + lr_scale: 0.1 + lr_transformer: ${times:8e-4,${scratch.lr_scale}} + lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}} + lr_language_backbone: ${times:5e-5,${scratch.lr_scale}} + lrd_vision_backbone: 0.9 + wd: 0.1 + scheduler_timescale: 20 + scheduler_warmup: 20 + scheduler_cooldown: 20 + + val_batch_size: 1 + collate_fn_val: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: roboflow100 + with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks! + + gradient_accumulation_steps: 1 + train_batch_size: 1 + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: all + with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks! + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: 20 + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + gradient_accumulation_steps: ${scratch.gradient_accumulation_steps} + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: ${roboflow_train.loss} + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + limit_ids: ${roboflow_train.num_images} + transforms: ${roboflow_train.train_transforms} + load_segmentation: ${scratch.enable_segmentation} + max_ann_per_img: 500000 + multiplier: 1 + max_train_queries: 50000 + max_val_queries: 50000 + training: true + use_caching: False + img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/ + ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/_annotations.coco.json + + shuffle: True + batch_size: ${scratch.train_batch_size} + num_workers: ${scratch.num_train_workers} + pin_memory: True + drop_last: True + collate_fn: ${scratch.collate_fn} + + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + load_segmentation: ${scratch.enable_segmentation} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + include_negatives: true + category_chunk_size: 2 # Note: You can increase this based on the memory of your GPU. + _partial_: true + img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/ + ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json + transforms: ${roboflow_train.val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: ${scratch.collate_fn_val} + + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + roboflow100: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${roboflow_train.supercategory} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json + tide: False + iou_type: "bbox" + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: sam3.train.optim.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: ${scratch.lrd_vision_backbone} + apply_to: 'backbone.vision_backbone.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: # transformer and class_embed + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_transformer} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_vision_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.vision_backbone.*' + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_language_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.language_backbone.*' + + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: ${scratch.wd} + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${roboflow_train.supercategory} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + # Uncomment for job array configuration + job_array: + num_tasks: 100 + task_index: 0 + +# ============================================================================ +# Available Roboflow Supercategories (for reference) +# ============================================================================ + +all_roboflow_supercategories: + - -grccs + - zebrasatasturias + - cod-mw-warzone + - canalstenosis + - label-printing-defect-version-2 + - new-defects-in-wood + - orionproducts + - aquarium-combined + - varroa-mites-detection--test-set + - clashroyalechardetector + - stomata-cells + - halo-infinite-angel-videogame + - pig-detection + - urine-analysis1 + - aerial-sheep + - orgharvest + - actions + - mahjong + - liver-disease + - needle-base-tip-min-max + - wheel-defect-detection + - aircraft-turnaround-dataset + - xray + - wildfire-smoke + - spinefrxnormalvindr + - ufba-425 + - speech-bubbles-detection + - train + - pill + - truck-movement + - car-logo-detection + - inbreast + - sea-cucumbers-new-tiles + - uavdet-small + - penguin-finder-seg + - aerial-airport + - bibdetection + - taco-trash-annotations-in-context + - bees + - recode-waste + - screwdetectclassification + - wine-labels + - aerial-cows + - into-the-vale + - gwhd2021 + - lacrosse-object-detection + - defect-detection + - dataconvert + - x-ray-id + - ball + - tube + - 2024-frc + - crystal-clean-brain-tumors-mri-dataset + - grapes-5 + - human-detection-in-floods + - buoy-onboarding + - apoce-aerial-photographs-for-object-detection-of-construction-equipment + - l10ul502 + - floating-waste + - deeppcb + - ism-band-packet-detection + - weeds4 + - invoice-processing + - thermal-cheetah + - tomatoes-2 + - marine-sharks + - peixos-fish + - sssod + - aerial-pool + - countingpills + - asphaltdistressdetection + - roboflow-trained-dataset + - everdaynew + - underwater-objects + - soda-bottles + - dentalai + - jellyfish + - deepfruits + - activity-diagrams + - circuit-voltages + - all-elements + - macro-segmentation + - exploratorium-daphnia + - signatures + - conveyor-t-shirts + - fruitjes + - grass-weeds + - infraredimageofpowerequipment + - 13-lkc01 + - wb-prova + - flir-camera-objects + - paper-parts + - football-player-detection + - trail-camera + - smd-components + - water-meter + - nih-xray + - the-dreidel-project + - electric-pylon-detection-in-rsi + - cable-damage diff --git a/detect_tools/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml b/detect_tools/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b95f628431298ec637a9b29c892bb0e2599bd80 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml @@ -0,0 +1,539 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + roboflow_vl_100_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + +# Roboflow dataset configuration +roboflow_train: + num_images: 100 # Note: This is the number of images used for training. If null, all images are used. + supercategory: ${all_roboflow_supercategories.${string:${submitit.job_array.task_index}}} + + # Training transforms pipeline + train_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterCrowds + - _target_: sam3.train.transforms.point_sampling.RandomizeInputBbox + box_noise_std: 0.1 + box_noise_max: 20 + - _target_: sam3.train.transforms.segmentation.DecodeRle + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: + _target_: sam3.train.transforms.basic.get_random_resize_scales + size: ${scratch.resolution} + min_size: 480 + rounded: false + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.PadToSizeAPI + size: ${scratch.resolution} + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.train_norm_mean} + std: ${scratch.train_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterFindQueriesWithTooManyOut + max_num_objects: ${scratch.max_ann_per_img} + + # Validation transforms pipeline + val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.train_norm_mean} + std: ${scratch.train_norm_std} + + # loss config (no mask loss) + loss: + _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper + matcher: ${scratch.matcher} + o2m_weight: 2.0 + o2m_matcher: + _target_: sam3.train.matcher.BinaryOneToManyMatcher + alpha: 0.3 + threshold: 0.4 + topk: 4 + use_o2m_matcher_on_o2m_aux: false # Another option is true + loss_fns_find: + - _target_: sam3.train.loss.loss_fns.Boxes + weight_dict: + loss_bbox: 5.0 + loss_giou: 2.0 + - _target_: sam3.train.loss.loss_fns.IABCEMdetr + weak_loss: False + weight_dict: + loss_ce: 20.0 # Another option is 100.0 + presence_loss: 20.0 + pos_weight: 10.0 # Another option is 5.0 + alpha: 0.25 + gamma: 2 + use_presence: True # Change + pos_focal: false + pad_n_queries: 200 + pad_scale_pos: 1.0 + + loss_fn_semantic_seg: null + scale_by_find_batch_size: ${scratch.scale_by_find_batch_size} + + + # NOTE: Loss to be used for training in case of segmentation + # loss: + # _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper + # matcher: ${scratch.matcher} + # o2m_weight: 2.0 + # o2m_matcher: + # _target_: sam3.train.matcher.BinaryOneToManyMatcher + # alpha: 0.3 + # threshold: 0.4 + # topk: 4 + # use_o2m_matcher_on_o2m_aux: false + # loss_fns_find: + # - _target_: sam3.train.loss.loss_fns.Boxes + # weight_dict: + # loss_bbox: 5.0 + # loss_giou: 2.0 + # - _target_: sam3.train.loss.loss_fns.IABCEMdetr + # weak_loss: False + # weight_dict: + # loss_ce: 20.0 # Another option is 100.0 + # presence_loss: 20.0 + # pos_weight: 10.0 # Another option is 5.0 + # alpha: 0.25 + # gamma: 2 + # use_presence: True # Change + # pos_focal: false + # pad_n_queries: 200 + # pad_scale_pos: 1.0 + # - _target_: sam3.train.loss.loss_fns.Masks + # focal_alpha: 0.25 + # focal_gamma: 2.0 + # weight_dict: + # loss_mask: 200.0 + # loss_dice: 10.0 + # compute_aux: false + # loss_fn_semantic_seg: + # _target_: sam3.losses.loss_fns.SemanticSegCriterion + # presence_head: True + # presence_loss: False # Change + # focal: True + # focal_alpha: 0.6 + # focal_gamma: 2.0 + # downsample: False + # weight_dict: + # loss_semantic_seg: 20.0 + # loss_semantic_presence: 1.0 + # loss_semantic_dice: 30.0 + # scale_by_find_batch_size: ${scratch.scale_by_find_batch_size} + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: False # NOTE: This is the number of queries used for segmentation + # Model parameters + d_model: 256 + pos_embed: + _target_: sam3.model.position_encoding.PositionEmbeddingSine + num_pos_feats: ${scratch.d_model} + normalize: true + scale: null + temperature: 10000 + + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Matcher configuration + matcher: + _target_: sam3.train.matcher.BinaryHungarianMatcherV2 + focal: true # with `focal: true` it is equivalent to BinaryFocalHungarianMatcher + cost_class: 2.0 + cost_bbox: 5.0 + cost_giou: 2.0 + alpha: 0.25 + gamma: 2 + stable: False + scale_by_find_batch_size: True + + # Image processing parameters + resolution: 1008 + consistent_transform: False + max_ann_per_img: 200 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + num_train_workers: 10 + num_val_workers: 0 + max_data_epochs: 20 + target_epoch_size: 1500 + hybrid_repeats: 1 + context_length: 2 + gather_pred_via_filesys: false + + # Learning rate and scheduler parameters + lr_scale: 0.1 + lr_transformer: ${times:8e-4,${scratch.lr_scale}} + lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}} + lr_language_backbone: ${times:5e-5,${scratch.lr_scale}} + lrd_vision_backbone: 0.9 + wd: 0.1 + scheduler_timescale: 20 + scheduler_warmup: 20 + scheduler_cooldown: 20 + + val_batch_size: 1 + collate_fn_val: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: roboflow100 + with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks! + + gradient_accumulation_steps: 1 + train_batch_size: 1 + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: all + with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks! + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: 20 + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: train + gradient_accumulation_steps: ${scratch.gradient_accumulation_steps} + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: ${roboflow_train.loss} + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + limit_ids: ${roboflow_train.num_images} + transforms: ${roboflow_train.train_transforms} + load_segmentation: ${scratch.enable_segmentation} + max_ann_per_img: 500000 + multiplier: 1 + max_train_queries: 50000 + max_val_queries: 50000 + training: true + use_caching: False + img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/ + ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/_annotations.coco.json + + shuffle: True + batch_size: ${scratch.train_batch_size} + num_workers: ${scratch.num_train_workers} + pin_memory: True + drop_last: True + collate_fn: ${scratch.collate_fn} + + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + load_segmentation: ${scratch.enable_segmentation} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + include_negatives: true + category_chunk_size: 2 # Note: You can increase this based on the memory of your GPU. + _partial_: true + img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/ + ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json + transforms: ${roboflow_train.val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: ${scratch.collate_fn_val} + + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: false + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + roboflow100: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${roboflow_train.supercategory} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json + tide: False + iou_type: "bbox" + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: sam3.train.optim.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: ${scratch.lrd_vision_backbone} + apply_to: 'backbone.vision_backbone.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: # transformer and class_embed + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_transformer} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_vision_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.vision_backbone.*' + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_language_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.language_backbone.*' + + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: ${scratch.wd} + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${roboflow_train.supercategory} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + # Uncomment for job array configuration + job_array: + num_tasks: 100 + task_index: 0 + +# ============================================================================ +# Available Roboflow Supercategories (for reference) +# ============================================================================ + +all_roboflow_supercategories: + - -grccs + - zebrasatasturias + - cod-mw-warzone + - canalstenosis + - label-printing-defect-version-2 + - new-defects-in-wood + - orionproducts + - aquarium-combined + - varroa-mites-detection--test-set + - clashroyalechardetector + - stomata-cells + - halo-infinite-angel-videogame + - pig-detection + - urine-analysis1 + - aerial-sheep + - orgharvest + - actions + - mahjong + - liver-disease + - needle-base-tip-min-max + - wheel-defect-detection + - aircraft-turnaround-dataset + - xray + - wildfire-smoke + - spinefrxnormalvindr + - ufba-425 + - speech-bubbles-detection + - train + - pill + - truck-movement + - car-logo-detection + - inbreast + - sea-cucumbers-new-tiles + - uavdet-small + - penguin-finder-seg + - aerial-airport + - bibdetection + - taco-trash-annotations-in-context + - bees + - recode-waste + - screwdetectclassification + - wine-labels + - aerial-cows + - into-the-vale + - gwhd2021 + - lacrosse-object-detection + - defect-detection + - dataconvert + - x-ray-id + - ball + - tube + - 2024-frc + - crystal-clean-brain-tumors-mri-dataset + - grapes-5 + - human-detection-in-floods + - buoy-onboarding + - apoce-aerial-photographs-for-object-detection-of-construction-equipment + - l10ul502 + - floating-waste + - deeppcb + - ism-band-packet-detection + - weeds4 + - invoice-processing + - thermal-cheetah + - tomatoes-2 + - marine-sharks + - peixos-fish + - sssod + - aerial-pool + - countingpills + - asphaltdistressdetection + - roboflow-trained-dataset + - everdaynew + - underwater-objects + - soda-bottles + - dentalai + - jellyfish + - deepfruits + - activity-diagrams + - circuit-voltages + - all-elements + - macro-segmentation + - exploratorium-daphnia + - signatures + - conveyor-t-shirts + - fruitjes + - grass-weeds + - infraredimageofpowerequipment + - 13-lkc01 + - wb-prova + - flir-camera-objects + - paper-parts + - football-player-detection + - trail-camera + - smd-components + - water-meter + - nih-xray + - the-dreidel-project + - electric-pylon-detection-in-rsi + - cable-damage diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5bed477a1d6fa5e54797db0177eb08eb279d2e5 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_sav_test + experiment_log_dir: + ytvis_json: /saco_veval_sav_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test_noheur.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abc3289c6cb6606e7ec7607d167bccf04150751a --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_sav_test + experiment_log_dir: + ytvis_json: /saco_veval_sav_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25c6e609217a637e3633e8d21d503cdb56e0f720 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_sav_val + experiment_log_dir: + ytvis_json: /saco_veval_sav_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val_noheur.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a89ebaa3eb295534a8d09e17b4f6003cadb8a12 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_sav_val + experiment_log_dir: + ytvis_json: /saco_veval_sav_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fdc818538143bbd69b11df026cb072cc4a385483 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_smartglasses_test + experiment_log_dir: + ytvis_json: /saco_veval_smartglasses_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test_noheur.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d6150e5cf9adde269a98061bff2332fb4f73866 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_smartglasses_test + experiment_log_dir: + ytvis_json: /saco_veval_smartglasses_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9bffda589381a9785eecf5cef4b3958a7840816 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_smartglasses_val + experiment_log_dir: + ytvis_json: /saco_veval_smartglasses_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val_noheur.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e1f64436d606cbd6fc82f103397e147f3e65345f --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_smartglasses_val + experiment_log_dir: + ytvis_json: /saco_veval_smartglasses_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71e50347e0ffa390bac299385d8be93e6bdffedc --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_yt1b_test + experiment_log_dir: + ytvis_json: /saco_veval_yt1b_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test_noheur.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8df6aeec0b574a767255b027aa68a8336c60e8e --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_yt1b_test + experiment_log_dir: + ytvis_json: /saco_veval_yt1b_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5816952ebb253b098b5ebe0a4e41edf4ffed57ee --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_yt1b_val + experiment_log_dir: + ytvis_json: /saco_veval_yt1b_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val_noheur.yaml b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..374e5e22b91003b2c8bcbcf6f7dc9462ec3038c3 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_yt1b_val + experiment_log_dir: + ytvis_json: /saco_veval_yt1b_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_bdd100k.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_bdd100k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5587cfb76237bfa6db8b5467632b2691f876cdf --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_bdd100k.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_bdd100k/ + coco_gt: ${paths.base_annotation_path_silver}/silver_bdd100k_merged_test.json + img_path: ${paths.silver_img_path}/bdd100k/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_bdd100k + + meters: + val: + silver_bdd100k: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_bdd100k + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_droid.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_droid.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c0d62341ba915d5a04f9fc4d88d057aed15848f7 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_droid.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_droid/ + coco_gt: ${paths.base_annotation_path_silver}/silver_droid_merged_test.json + img_path: ${paths.silver_img_path}/droid/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_droid + + meters: + val: + silver_droid: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_droid + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_ego4d.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_ego4d.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5a036d93d44a093755462cf748b2ed66a1e8a4f --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_ego4d.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_ego4d/ + coco_gt: ${paths.base_annotation_path_silver}/silver_ego4d_merged_test.json + img_path: ${paths.silver_img_path}/ego4d/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_ego4d + + meters: + val: + silver_ego4d: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_ego4d + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_fathomnet.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_fathomnet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b15d0c82328171d8ed0c9d4b52c35477813ca389 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_fathomnet.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_fathomnet/ + coco_gt: ${paths.base_annotation_path_silver}/silver_fathomnet_test.json + img_path: ${paths.silver_img_path}/fathomnet/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_fathomnet + + meters: + val: + silver_fathomnet: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_fathomnet + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_food_rec.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_food_rec.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5158ff551e5d2babb1100ba1978e0da4613bac8d --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_food_rec.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_food_rec/ + coco_gt: ${paths.base_annotation_path_silver}/silver_food_rec_merged_test.json + img_path: ${paths.silver_img_path}/food_rec/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_food_rec + + meters: + val: + silver_food_rec: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_food_rec + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_geode.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_geode.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08f159fe9bc80072d8bd4a95f911bc70a555588d --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_geode.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_geode/ + coco_gt: ${paths.base_annotation_path_silver}/silver_geode_merged_test.json + img_path: ${paths.silver_img_path}/geode/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_geode + + meters: + val: + silver_geode: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_geode + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_inaturalist.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_inaturalist.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d56d9758d8cb5711911b60278fe454c975a8456 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_inaturalist.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_inaturalist/ + coco_gt: ${paths.base_annotation_path_silver}/silver_inaturalist_merged_test.json + img_path: ${paths.silver_img_path}/inaturalist/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_inaturalist + + meters: + val: + silver_inaturalist: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_inaturalist + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_nga.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_nga.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2de0afed4289272ca36634a911a7a1d38e03aa3 --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_nga.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_nga_art/ + coco_gt: ${paths.base_annotation_path_silver}/silver_nga_art_merged_test.json + img_path: ${paths.silver_img_path}/nga/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_nga_art + + meters: + val: + silver_nga_art: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_nga_art + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_sav.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_sav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ebbb0f2bbee9b06221a46d362b8ca719bed9b4b --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_sav.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_sav/ + coco_gt: ${paths.base_annotation_path_silver}/silver_sav_merged_test.json + img_path: ${paths.silver_img_path}/sav/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_sav + + meters: + val: + silver_sav: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_sav + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_yt1b.yaml b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_yt1b.yaml new file mode 100644 index 0000000000000000000000000000000000000000..901bd3a050f4041364b36299fa01648ce576d0af --- /dev/null +++ b/detect_tools/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_yt1b.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_yt1b/ + coco_gt: ${paths.base_annotation_path_silver}/silver_yt1b_merged_test.json + img_path: ${paths.silver_img_path}/yt1b/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_yt1b + + meters: + val: + silver_yt1b: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_yt1b + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/detect_tools/sam3/sam3/train/data/__init__.py b/detect_tools/sam3/sam3/train/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/train/data/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/train/data/coco_json_loaders.py b/detect_tools/sam3/sam3/train/data/coco_json_loaders.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdf4878f9e95f765184d804ec65d7cce69160ef --- /dev/null +++ b/detect_tools/sam3/sam3/train/data/coco_json_loaders.py @@ -0,0 +1,465 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import json +from collections import defaultdict +from typing import Dict, List, Tuple + +import torch +from pycocotools import mask as mask_util + + +# ============================================================================ +# Utility Functions +# ============================================================================ + + +def convert_boxlist_to_normalized_tensor(box_list, image_width, image_height): + """ + Converts a list of bounding boxes to a normalized PyTorch tensor. + + Args: + box_list (list of list or tuples): Each box is [x_min, y_min, x_max, y_max]. + image_width (int or float): Width of the image. + image_height (int or float): Height of the image. + + Returns: + torch.Tensor: Normalized tensor of shape (N, 4), values in [0, 1]. + """ + boxes = torch.tensor(box_list, dtype=torch.float32) + boxes[:, [0, 2]] /= image_width # x_min, x_max + boxes[:, [1, 3]] /= image_height # y_min, y_max + boxes = boxes.clamp(0, 1) + return boxes + + +def load_coco_and_group_by_image(json_path: str) -> Tuple[List[Dict], Dict[int, str]]: + """ + Load COCO JSON file and group annotations by image. + + Args: + json_path (str): Path to COCO JSON file. + + Returns: + Tuple containing: + - List of dicts with 'image' and 'annotations' keys + - Dict mapping category IDs to category names + """ + with open(json_path, "r") as f: + coco = json.load(f) + + images = {img["id"]: img for img in coco["images"]} + + anns_by_image = defaultdict(list) + for ann in coco["annotations"]: + anns_by_image[ann["image_id"]].append(ann) + + sorted_image_ids = sorted(images.keys()) + + grouped = [] + for image_id in sorted_image_ids: + image_info = images[image_id] + grouped.append( + {"image": image_info, "annotations": anns_by_image.get(image_id, [])} + ) + + cat_id_to_name = {cat["id"]: cat["name"] for cat in coco["categories"]} + + return grouped, cat_id_to_name + + +def ann_to_rle(segm, im_info: Dict) -> Dict: + """ + Convert annotation which can be polygons or uncompressed RLE to RLE. + + Args: + segm: Segmentation data (polygon list or RLE dict) + im_info (dict): Image info containing 'height' and 'width' + + Returns: + RLE encoded segmentation + """ + h, w = im_info["height"], im_info["width"] + + if isinstance(segm, list): + # Polygon - merge all parts into one mask RLE code + rles = mask_util.frPyObjects(segm, h, w) + rle = mask_util.merge(rles) + elif isinstance(segm["counts"], list): + # Uncompressed RLE + rle = mask_util.frPyObjects(segm, h, w) + else: + # Already RLE + rle = segm + + return rle + + +# ============================================================================ +# COCO Training API +# ============================================================================ + + +class COCO_FROM_JSON: + """ + COCO training API for loading box-only annotations from JSON. + Groups all annotations per image and creates queries per category. + """ + + def __init__( + self, + annotation_file, + prompts=None, + include_negatives=True, + category_chunk_size=None, + ): + """ + Initialize the COCO training API. + + Args: + annotation_file (str): Path to COCO JSON annotation file + prompts: Optional custom prompts for categories + include_negatives (bool): Whether to include negative examples (categories with no instances) + """ + self._raw_data, self._cat_idx_to_text = load_coco_and_group_by_image( + annotation_file + ) + self._sorted_cat_ids = sorted(list(self._cat_idx_to_text.keys())) + self.prompts = None + self.include_negatives = include_negatives + self.category_chunk_size = ( + category_chunk_size + if category_chunk_size is not None + else len(self._sorted_cat_ids) + ) + self.category_chunks = [ + self._sorted_cat_ids[i : i + self.category_chunk_size] + for i in range(0, len(self._sorted_cat_ids), self.category_chunk_size) + ] + if prompts is not None: + prompts = eval(prompts) + self.prompts = {} + for loc_dict in prompts: + self.prompts[int(loc_dict["id"])] = loc_dict["name"] + assert len(self.prompts) == len( + self._sorted_cat_ids + ), "Number of prompts must match number of categories" + + def getDatapointIds(self): + """Return all datapoint indices for training.""" + return list(range(len(self._raw_data) * len(self.category_chunks))) + + def loadQueriesAndAnnotationsFromDatapoint(self, idx): + """ + Load queries and annotations for a specific datapoint. + + Args: + idx (int): Datapoint index + + Returns: + Tuple of (queries, annotations) lists + """ + img_idx = idx // len(self.category_chunks) + chunk_idx = idx % len(self.category_chunks) + cat_chunk = self.category_chunks[chunk_idx] + + queries = [] + annotations = [] + + query_template = { + "id": None, + "original_cat_id": None, + "object_ids_output": None, + "query_text": None, + "query_processing_order": 0, + "ptr_x_query_id": None, + "ptr_y_query_id": None, + "image_id": 0, # Single image per datapoint + "input_box": None, + "input_box_label": None, + "input_points": None, + "is_exhaustive": True, + } + + annot_template = { + "image_id": 0, + "bbox": None, # Normalized bbox in xywh + "area": None, # Unnormalized area + "segmentation": None, # RLE encoded + "object_id": None, + "is_crowd": None, + "id": None, + } + + raw_annotations = self._raw_data[img_idx]["annotations"] + image_info = self._raw_data[img_idx]["image"] + width, height = image_info["width"], image_info["height"] + + # Group annotations by category + cat_id_to_anns = defaultdict(list) + for ann in raw_annotations: + cat_id_to_anns[ann["category_id"]].append(ann) + + annotations_by_cat_sorted = [ + (cat_id, cat_id_to_anns[cat_id]) for cat_id in cat_chunk + ] + + for cat_id, anns in annotations_by_cat_sorted: + if len(anns) == 0 and not self.include_negatives: + continue + + cur_ann_ids = [] + + # Create annotations for this category + for ann in anns: + annotation = annot_template.copy() + annotation["id"] = len(annotations) + annotation["object_id"] = annotation["id"] + annotation["is_crowd"] = ann["iscrowd"] + + normalized_boxes = convert_boxlist_to_normalized_tensor( + [ann["bbox"]], width, height + ) + bbox = normalized_boxes[0] + + annotation["area"] = (bbox[2] * bbox[3]).item() + annotation["bbox"] = bbox + + if ( + "segmentation" in ann + and ann["segmentation"] is not None + and ann["segmentation"] != [] + ): + annotation["segmentation"] = ann_to_rle( + ann["segmentation"], im_info=image_info + ) + + annotations.append(annotation) + cur_ann_ids.append(annotation["id"]) + + # Create query for this category + query = query_template.copy() + query["id"] = len(queries) + query["original_cat_id"] = cat_id + query["query_text"] = ( + self._cat_idx_to_text[cat_id] + if self.prompts is None + else self.prompts[cat_id] + ) + query["object_ids_output"] = cur_ann_ids + queries.append(query) + + return queries, annotations + + def loadImagesFromDatapoint(self, idx): + """ + Load image information for a specific datapoint. + + Args: + idx (int): Datapoint index + + Returns: + List containing image info dict + """ + img_idx = idx // len(self.category_chunks) + img_data = self._raw_data[img_idx]["image"] + images = [ + { + "id": 0, + "file_name": img_data["file_name"], + "original_img_id": img_data["id"], + "coco_img_id": img_data["id"], + } + ] + return images + + +# ============================================================================ +# SAM3 Evaluation APIs +# ============================================================================ + + +class SAM3_EVAL_API_FROM_JSON_NP: + """ + SAM3 evaluation API for loading noun phrase queries from JSON. + """ + + def __init__(self, annotation_file): + """ + Initialize the SAM3 evaluation API. + + Args: + annotation_file (str): Path to SAM3 JSON annotation file + """ + with open(annotation_file, "r") as f: + data = json.load(f) + self._image_data = data["images"] + + def getDatapointIds(self): + """Return all datapoint indices.""" + return list(range(len(self._image_data))) + + def loadQueriesAndAnnotationsFromDatapoint(self, idx): + """ + Load queries and annotations for a specific datapoint. + + Args: + idx (int): Datapoint index + + Returns: + Tuple of (queries, annotations) lists + """ + cur_img_data = self._image_data[idx] + queries = [] + annotations = [] + + query_template = { + "id": None, + "original_cat_id": None, + "object_ids_output": None, + "query_text": None, + "query_processing_order": 0, + "ptr_x_query_id": None, + "ptr_y_query_id": None, + "image_id": 0, + "input_box": None, + "input_box_label": None, + "input_points": None, + "is_exhaustive": True, + } + + # Create query + query = query_template.copy() + query["id"] = len(queries) + query["original_cat_id"] = int(cur_img_data["queried_category"]) + query["query_text"] = cur_img_data["text_input"] + query["object_ids_output"] = [] + queries.append(query) + + return queries, annotations + + def loadImagesFromDatapoint(self, idx): + """ + Load image information for a specific datapoint. + + Args: + idx (int): Datapoint index + + Returns: + List containing image info dict + """ + img_data = self._image_data[idx] + images = [ + { + "id": 0, + "file_name": img_data["file_name"], + "original_img_id": img_data["id"], + "coco_img_id": img_data["id"], + } + ] + return images + + +class SAM3_VEVAL_API_FROM_JSON_NP: + """ + SAM3 video evaluation API for loading noun phrase queries from JSON. + """ + + def __init__(self, annotation_file): + """ + Initialize the SAM3 video evaluation API. + + Args: + annotation_file (str): Path to SAM3 video JSON annotation file + """ + with open(annotation_file, "r") as f: + data = json.load(f) + + assert "video_np_pairs" in data, "Incorrect data format" + + self._video_data = data["videos"] + self._video_id_to_np_ids = defaultdict(list) + self._cat_id_to_np = {} + + for cat_dict in data["categories"]: + self._cat_id_to_np[cat_dict["id"]] = cat_dict["name"] + + for video_np_dict in data["video_np_pairs"]: + self._video_id_to_np_ids[video_np_dict["video_id"]].append( + video_np_dict["category_id"] + ) + assert ( + self._cat_id_to_np[video_np_dict["category_id"]] + == video_np_dict["noun_phrase"] + ), "Category name does not match text input" + + def getDatapointIds(self): + """Return all datapoint indices.""" + return list(range(len(self._video_data))) + + def loadQueriesAndAnnotationsFromDatapoint(self, idx): + """ + Load queries and annotations for a specific video datapoint. + + Args: + idx (int): Datapoint index + + Returns: + Tuple of (queries, annotations) lists + """ + cur_vid_data = self._video_data[idx] + queries = [] + annotations = [] + + query_template = { + "id": None, + "original_cat_id": None, + "object_ids_output": None, + "query_text": None, + "query_processing_order": 0, + "ptr_x_query_id": None, + "ptr_y_query_id": None, + "image_id": 0, + "input_box": None, + "input_box_label": None, + "input_points": None, + "is_exhaustive": True, + } + + all_np_ids = self._video_id_to_np_ids[cur_vid_data["id"]] + + for np_id in all_np_ids: + text_input = self._cat_id_to_np[np_id] + + for i, image_path in enumerate(cur_vid_data["file_names"]): + query = query_template.copy() + query["id"] = len(queries) + query["original_cat_id"] = np_id + query["query_text"] = text_input + query["image_id"] = i + query["query_processing_order"] = i + query["object_ids_output"] = [] + queries.append(query) + + return queries, annotations + + def loadImagesFromDatapoint(self, idx): + """ + Load image information for a specific video datapoint. + + Args: + idx (int): Datapoint index + + Returns: + List containing image info dicts for all frames + """ + video_data = self._video_data[idx] + images = [ + { + "id": i, + "file_name": file_name, + "original_img_id": video_data["id"], + "coco_img_id": video_data["id"], + } + for i, file_name in enumerate(video_data["file_names"]) + ] + return images diff --git a/detect_tools/sam3/sam3/train/data/collator.py b/detect_tools/sam3/sam3/train/data/collator.py new file mode 100644 index 0000000000000000000000000000000000000000..e0f7e2e265a36b4a08bffeecc28df8139167f2d4 --- /dev/null +++ b/detect_tools/sam3/sam3/train/data/collator.py @@ -0,0 +1,360 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from dataclasses import dataclass, field as field_ptr_behaviour, fields, is_dataclass +from typing import Any, get_args, get_origin, List, Union + +import torch + +from sam3.model.data_misc import ( + BatchedDatapoint, + BatchedFindTarget, + BatchedInferenceMetadata, + FindStage, +) + +from .sam3_image_dataset import Datapoint + + +MyTensor = Union[torch.Tensor, List[Any]] + + +def convert_my_tensors(obj): + def is_optional_field(field) -> bool: + return get_origin(field) is Union and type(None) in get_args(field) + + for field in fields(obj): + if is_dataclass(getattr(obj, field.name)): + convert_my_tensors(getattr(obj, field.name)) + continue + + field_type = field.type + if is_optional_field(field.type): + field_type = Union[get_args(field.type)[:-1]] # Get the Optional field type + + if field_type != MyTensor or getattr(obj, field.name) is None: + continue + + elif len(getattr(obj, field.name)) and isinstance( + getattr(obj, field.name)[0], torch.Tensor + ): + stack_dim = 0 + if field.name in [ + "input_boxes", + "input_boxes_label", + ]: + stack_dim = 1 + setattr( + obj, + field.name, + torch.stack(getattr(obj, field.name), dim=stack_dim).to( + getattr(obj, field.name + "__type") + ), + ) + else: + setattr( + obj, + field.name, + torch.as_tensor( + getattr(obj, field.name), dtype=getattr(obj, field.name + "__type") + ), + ) + return obj + + +def packed_to_padded_naive(boxes_packed, num_boxes, fill_value=0): + """ + Convert a packed tensor of bounding boxes to a padded tensor of bounding + boxes. Naive implementation using a loop. + + Inputs: + - boxes_packed: Tensor of shape (N_1 + ... + N_B, 4) + - num_boxes: Tensor of shape (B,) where num_boxes[i] = N_i + + Returns: + - boxes_padded: Tensor of shape (B, N_max, 4) where N_max = max_i N_i + """ + B = num_boxes.shape[0] + Ns = num_boxes.tolist() + + boxes_padded = boxes_packed.new_zeros(B, max(Ns), *boxes_packed.shape[1:]) + if fill_value != 0: + boxes_padded[...] = fill_value + prev_idx = 0 + for i in range(B): + next_idx = prev_idx + Ns[i] + boxes_padded[i, : Ns[i]] = boxes_packed[prev_idx:next_idx] + prev_idx = next_idx + return boxes_padded + + +def pad_tensor_list_to_longest( + tensors: List[torch.Tensor], dim=0, pad_val=0 +) -> List[torch.Tensor]: + # Edits the list in-place + if not tensors: + return tensors + pad_len = max(t.shape[dim] for t in tensors) + for i in range(len(tensors)): + n_dims = len(tensors[i].shape) + n_right_dims = (n_dims - 1) - (n_dims + dim) % n_dims + n_pad = pad_len - tensors[i].shape[dim] + pad_tuple = tuple([0] * 2 * n_right_dims + [0, n_pad]) + tensors[i] = torch.nn.functional.pad(tensors[i], pad_tuple, value=pad_val) + return tensors + + +def collate_fn_api_with_chunking( + batch, + num_chunks, + dict_key, + with_seg_masks=False, + input_points_embedding_dim=257, + repeats: int = 0, + load_image_in_fp16: bool = False, +): + assert num_chunks >= 1, "num_chunks must be >= 1" + + # split the batch into num_chunks chunks + batch_chunks = [batch[i::num_chunks] for i in range(num_chunks)] + + # collate each chunk + collated_chunks = [ + collate_fn_api( + chunk, + dict_key, + with_seg_masks, + input_points_embedding_dim, + repeats, + # ptr_behaviour, + load_image_in_fp16, + ) + for chunk in batch_chunks + ] + return collated_chunks + + +def collate_fn_api( + batch: List[Datapoint], + dict_key, + with_seg_masks=False, + input_points_embedding_dim=257, + repeats: int = 0, + load_image_in_fp16: bool = False, +): + # img_batch = torch.stack(sum([[img.data for img in v.images] for v in batch], [])) + img_batch = [] + text_batch = [] + raw_images = None + + num_stages = ( + max(q.query_processing_order for data in batch for q in data.find_queries) + 1 + ) + + stages = [ + FindStage( + img_ids=[], + text_ids=[], + input_boxes=[], + input_boxes_label=[], + input_boxes_mask=[], + input_points=[], + input_points_mask=[], + object_ids=[], + ) + for _ in range(num_stages) + ] + find_targets = [ + BatchedFindTarget( + num_boxes=[], + boxes=[], + boxes_padded=[], + is_exhaustive=[], + segments=[], + semantic_segments=[], + is_valid_segment=[], + repeated_boxes=[], + object_ids=[], + object_ids_padded=[], + ) + for _ in range(num_stages) + ] + find_metadatas = [ + BatchedInferenceMetadata( + coco_image_id=[], + original_size=[], + object_id=[], + frame_index=[], + original_image_id=[], + original_category_id=[], + is_conditioning_only=[], + ) + for _ in range(num_stages) + ] + + offset_img_id = 0 + offset_query_id = [0 for _ in range(num_stages)] + for i, data in enumerate(batch): + img_batch.extend([img.data for img in data.images]) + + if data.raw_images is not None: + if raw_images is None: + raw_images = [] + raw_images.extend(data.raw_images) + + # Conversion of query_ids indexing in a datapoint to query_ids indexing in a stage + datapoint_query_id_2_stage_query_id = [] + for q in data.find_queries: + stage_id = q.query_processing_order + datapoint_query_id_2_stage_query_id.append(offset_query_id[stage_id]) + offset_query_id[stage_id] += 1 + + for j, q in enumerate(data.find_queries): + stage_id = q.query_processing_order + stages[stage_id].img_ids.append(q.image_id + offset_img_id) + if q.query_text not in text_batch: + text_batch.append(q.query_text) + stages[stage_id].text_ids.append(text_batch.index(q.query_text)) + + assert ( + q.inference_metadata is not None + ), "inference_metadata must be provided when FindQueryLoaded is created." + for f in fields(q.inference_metadata): + getattr(find_metadatas[stage_id], f.name).append( + getattr(q.inference_metadata, f.name) + ) + + if q.input_bbox is not None: + assert q.input_bbox.numel() % 4 == 0 + assert q.input_bbox_label is not None + nb_boxes = q.input_bbox.numel() // 4 + assert len(q.input_bbox_label) == nb_boxes + stages[stage_id].input_boxes.append(q.input_bbox.view(nb_boxes, 4)) + stages[stage_id].input_boxes_label.append( + q.input_bbox_label.view(nb_boxes) + ) + stages[stage_id].input_boxes_mask.append( + torch.zeros(nb_boxes, dtype=torch.bool) + ) + else: + stages[stage_id].input_boxes.append(torch.zeros(0, 4)) + stages[stage_id].input_boxes_label.append( + torch.zeros(0, dtype=torch.bool) + ) + stages[stage_id].input_boxes_mask.append( + torch.ones(0, dtype=torch.bool) + ) + + if q.input_points is not None: + stages[stage_id].input_points.append( + q.input_points.squeeze(0) # Strip a trivial batch index + ) + # All masks will be padded up to the longest length + # with 1s before final conversion to batchd tensors + stages[stage_id].input_points_mask.append( + torch.zeros(q.input_points.shape[1]) + ) + else: + stages[stage_id].input_points.append( + torch.empty(0, input_points_embedding_dim) + ) + stages[stage_id].input_points_mask.append(torch.empty(0)) + + current_out_boxes = [] + current_out_object_ids = [] + # Set the object ids referred to by this query + stages[stage_id].object_ids.append(q.object_ids_output) + for object_id in q.object_ids_output: + current_out_boxes.append( + data.images[q.image_id].objects[object_id].bbox + ) + current_out_object_ids.append(object_id) + find_targets[stage_id].boxes.extend(current_out_boxes) + find_targets[stage_id].object_ids.extend(current_out_object_ids) + if repeats > 0: + for _ in range(repeats): + find_targets[stage_id].repeated_boxes.extend(current_out_boxes) + find_targets[stage_id].num_boxes.append(len(current_out_boxes)) + find_targets[stage_id].is_exhaustive.append(q.is_exhaustive) + + if with_seg_masks: + current_seg_mask = [] + current_is_valid_segment = [] + for object_id in q.object_ids_output: + seg_mask = data.images[q.image_id].objects[object_id].segment + if seg_mask is not None: + current_seg_mask.append(seg_mask) + current_is_valid_segment.append(1) + else: + dummy_mask = torch.zeros( + data.images[q.image_id].data.shape[-2:], dtype=torch.bool + ) + current_seg_mask.append(dummy_mask) + current_is_valid_segment.append(0) + find_targets[stage_id].segments.extend(current_seg_mask) + find_targets[stage_id].is_valid_segment.extend(current_is_valid_segment) + else: + # We are not loading segmentation masks + find_targets[stage_id].segments = None + find_targets[stage_id].is_valid_segment = None + + if q.semantic_target is not None: + find_targets[stage_id].semantic_segments.append(q.semantic_target) + + offset_img_id += len(data.images) + + # Pad input points to equal sequence lengths + for i in range(len(stages)): + stages[i].input_points = pad_tensor_list_to_longest( + stages[i].input_points, dim=0, pad_val=0 + ) + # Masked-out regions indicated by 1s. + stages[i].input_points_mask = pad_tensor_list_to_longest( + stages[i].input_points_mask, dim=0, pad_val=1 + ) + + # Pad input boxes to equal sequence lengths + for i in range(len(stages)): + stages[i].input_boxes = pad_tensor_list_to_longest( + stages[i].input_boxes, dim=0, pad_val=0 + ) + stages[i].input_boxes_label = pad_tensor_list_to_longest( + stages[i].input_boxes_label, dim=0, pad_val=0 + ) + # Masked-out regions indicated by 1s. + stages[i].input_boxes_mask = pad_tensor_list_to_longest( + stages[i].input_boxes_mask, dim=0, pad_val=1 + ) + + # Convert to tensors + for i in range(len(stages)): + stages[i] = convert_my_tensors(stages[i]) + find_targets[i] = convert_my_tensors(find_targets[i]) + find_metadatas[i] = convert_my_tensors(find_metadatas[i]) + # get padded representation for the boxes + find_targets[i].boxes_padded = packed_to_padded_naive( + find_targets[i].boxes.view(-1, 4), find_targets[i].num_boxes + ) + find_targets[i].object_ids_padded = packed_to_padded_naive( + find_targets[i].object_ids, find_targets[i].num_boxes, fill_value=-1 + ) + + # Finalize the image batch + # check sizes + for img in img_batch[1:]: + assert img.shape == img_batch[0].shape, "All images must have the same size" + image_batch = torch.stack(img_batch) + if load_image_in_fp16: + # Optionally, cast the image tensors to fp16, which helps save GPU memory on + # long videos with thousands of frames (where image tensors could be several GBs) + image_batch = image_batch.half() + + return { + dict_key: BatchedDatapoint( + img_batch=image_batch, + find_text_batch=text_batch, + find_inputs=stages, + find_targets=find_targets, + find_metadatas=find_metadatas, + raw_images=raw_images, + ) + } diff --git a/detect_tools/sam3/sam3/train/data/sam3_image_dataset.py b/detect_tools/sam3/sam3/train/data/sam3_image_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..97efb1d15bb62a7a8d2b04df1e5edaeb958aa235 --- /dev/null +++ b/detect_tools/sam3/sam3/train/data/sam3_image_dataset.py @@ -0,0 +1,528 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Dataset class for modulated detection""" + +import json +import os +import random +import sys +import traceback +from collections import Counter +from dataclasses import dataclass +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import torch +import torch.utils.data +import torchvision +from decord import cpu, VideoReader +from iopath.common.file_io import g_pathmgr + +from PIL import Image as PILImage +from PIL.Image import DecompressionBombError + +from sam3.model.box_ops import box_xywh_to_xyxy +from torchvision.datasets.vision import VisionDataset + +from .coco_json_loaders import COCO_FROM_JSON + + +@dataclass +class InferenceMetadata: + """Metadata required for postprocessing""" + + # Coco id that corresponds to the "image" for evaluation by the coco evaluator + # This is used for our own "class agnostic" evaluation + coco_image_id: int + + # id in the original dataset, such that we can use the original evaluator + original_image_id: int + + # Original category id (if we want to use the original evaluator) + original_category_id: int + + # Size of the raw image (height, width) + original_size: Tuple[int, int] + + # Id of the object in the media + object_id: int + + # Index of the frame in the media (0 if single image) + frame_index: int + + # Whether it is for conditioning only, e.g., 0-th frame in TA is for conditioning + # as we assume GT available in frame 0. + is_conditioning_only: Optional[bool] = False + + +@dataclass +class FindQuery: + query_text: str + + image_id: int + + # In case of a find query, the list of object ids that have to be predicted + object_ids_output: List[int] + + # This is "instance exhaustivity". + # true iff all instances are separable and annotated + # See below the slightly different "pixel exhaustivity" + is_exhaustive: bool + + # The order in which the queries are processed (only meaningful for video) + query_processing_order: int = 0 + + # Input geometry, initially in denormalized XYXY format. Then + # 1. converted to normalized CxCyWH by the Normalize transform + input_bbox: Optional[torch.Tensor] = None + input_bbox_label: Optional[torch.Tensor] = None + + # Only for the PVS task + input_points: Optional[torch.Tensor] = None + + semantic_target: Optional[torch.Tensor] = None + + # pixel exhaustivity: true iff the union of all segments (including crowds) + # covers every pixel belonging to the target class + # Note that instance_exhaustive implies pixel_exhaustive + is_pixel_exhaustive: Optional[bool] = None + + +@dataclass +class FindQueryLoaded(FindQuery): + # Must have default value since FindQuery has entries with default values + inference_metadata: Optional[InferenceMetadata] = None + + +@dataclass +class Object: + # Initially in denormalized XYXY format, gets converted to normalized CxCyWH by the Normalize transform + bbox: torch.Tensor + area: float + + # Id of the object in the media + object_id: Optional[int] = -1 + + # Index of the frame in the media (0 if single image) + frame_index: Optional[int] = -1 + + segment: Optional[Union[torch.Tensor, dict]] = None # RLE dict or binary mask + + is_crowd: bool = False + + source: Optional[str] = None + + +@dataclass +class Image: + data: Union[torch.Tensor, PILImage.Image] + objects: List[Object] + size: Tuple[int, int] # (height, width) + + # For blurring augmentation + blurring_mask: Optional[Dict[str, Any]] = None + + +@dataclass +class Datapoint: + """Refers to an image/video and all its annotations""" + + find_queries: List[FindQueryLoaded] + images: List[Image] + raw_images: Optional[List[PILImage.Image]] = None + + +class CustomCocoDetectionAPI(VisionDataset): + """`MS Coco Detection `_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__( + self, + root: str, + annFile: str, + load_segmentation: bool, + fix_fname: bool = False, + training: bool = True, + blurring_masks_path: Optional[str] = None, + use_caching: bool = True, + zstd_dict_path=None, + filter_query=None, + coco_json_loader: Callable = COCO_FROM_JSON, + limit_ids: int = None, + ) -> None: + super().__init__(root) + + self.annFile = annFile + self.use_caching = use_caching + self.zstd_dict_path = zstd_dict_path + + self.curr_epoch = 0 # Used in case data loader behavior changes across epochs + self.load_segmentation = load_segmentation + self.fix_fname = fix_fname + self.filter_query = filter_query + + self.coco = None + self.coco_json_loader = coco_json_loader + self.limit_ids = limit_ids + self.set_sharded_annotation_file(0) + self.training = training + self.blurring_masks_path = blurring_masks_path + + def _load_images( + self, datapoint_id: int, img_ids_to_load: Optional[Set[int]] = None + ) -> Tuple[List[Tuple[int, PILImage.Image]], List[Dict[str, Any]]]: + all_images = [] + all_img_metadata = [] + for current_meta in self.coco.loadImagesFromDatapoint(datapoint_id): + img_id = current_meta["id"] + if img_ids_to_load is not None and img_id not in img_ids_to_load: + continue + if self.fix_fname: + current_meta["file_name"] = current_meta["file_name"].split("/")[-1] + path = current_meta["file_name"] + if self.blurring_masks_path is not None: + mask_fname = os.path.basename(path).replace(".jpg", "-mask.json") + mask_path = os.path.join(self.blurring_masks_path, mask_fname) + if os.path.exists(mask_path): + with open(mask_path, "r") as fopen: + current_meta["blurring_mask"] = json.load(fopen) + + all_img_metadata.append(current_meta) + path = os.path.join(self.root, path) + try: + if ".mp4" in path and path[-4:] == ".mp4": + # Going to load a video frame + video_path, frame = path.split("@") + video = VideoReader(video_path, ctx=cpu(0)) + # Convert to PIL image + all_images.append( + ( + img_id, + torchvision.transforms.ToPILImage()( + video[int(frame)].asnumpy() + ), + ) + ) + else: + with g_pathmgr.open(path, "rb") as fopen: + all_images.append((img_id, PILImage.open(fopen).convert("RGB"))) + except FileNotFoundError as e: + print(f"File not found: {path} from dataset: {self.annFile}") + raise e + + return all_images, all_img_metadata + + def set_curr_epoch(self, epoch: int): + self.curr_epoch = epoch + + def set_epoch(self, epoch: int): + pass + + def set_sharded_annotation_file(self, data_epoch: int): + if self.coco is not None: + return + + assert g_pathmgr.isfile( + self.annFile + ), f"please provide valid annotation file. Missing: {self.annFile}" + annFile = g_pathmgr.get_local_path(self.annFile) + + if self.coco is not None: + del self.coco + + self.coco = self.coco_json_loader(annFile) + # Use a torch tensor here to optimize memory usage when using several dataloaders + ids_list = list(sorted(self.coco.getDatapointIds())) + if self.limit_ids is not None: + local_random = random.Random(len(ids_list)) + local_random.shuffle(ids_list) + ids_list = ids_list[: self.limit_ids] + self.ids = torch.as_tensor(ids_list, dtype=torch.long) + + def __getitem__(self, index: int) -> Datapoint: + return self._load_datapoint(index) + + def _load_datapoint(self, index: int) -> Datapoint: + """A separate method for easy overriding in subclasses.""" + id = self.ids[index].item() + pil_images, img_metadata = self._load_images(id) + queries, annotations = self.coco.loadQueriesAndAnnotationsFromDatapoint(id) + return self.load_queries(pil_images, annotations, queries, img_metadata) + + def load_queries(self, pil_images, annotations, queries, img_metadata): + """Transform the raw image and queries into a Datapoint sample.""" + images: List[Image] = [] + id2index_img = {} + id2index_obj = {} + id2index_find_query = {} + id2imsize = {} + assert len(pil_images) == len(img_metadata) + for i in range(len(pil_images)): + w, h = pil_images[i][1].size + blurring_mask = None + if "blurring_mask" in img_metadata[i]: + blurring_mask = img_metadata[i]["blurring_mask"] + images.append( + Image( + data=pil_images[i][1], + objects=[], + size=(h, w), + blurring_mask=blurring_mask, + ) + ) + id2index_img[pil_images[i][0]] = i + id2imsize[pil_images[i][0]] = (h, w) + + for annotation in annotations: + image_id = id2index_img[annotation["image_id"]] + bbox = box_xywh_to_xyxy(torch.as_tensor(annotation["bbox"])).view(1, 4) + h, w = id2imsize[annotation["image_id"]] + bbox[:, 0::2].mul_(w).clamp_(min=0, max=w) + bbox[:, 1::2].mul_(h).clamp_(min=0, max=h) + segment = None + if self.load_segmentation and "segmentation" in annotation: + # We're not decoding the RLE here, a transform will do it lazily later + segment = annotation["segmentation"] + images[image_id].objects.append( + Object( + bbox=bbox[0], + area=annotation["area"], + object_id=( + annotation["object_id"] if "object_id" in annotation else -1 + ), + frame_index=( + annotation["frame_index"] if "frame_index" in annotation else -1 + ), + segment=segment, + is_crowd=( + annotation["is_crowd"] if "is_crowd" in annotation else None + ), + source=annotation["source"] if "source" in annotation else "", + ) + ) + id2index_obj[annotation["id"]] = len(images[image_id].objects) - 1 + + find_queries = [] + stage2num_queries = Counter() + for i, query in enumerate(queries): + stage2num_queries[query["query_processing_order"]] += 1 + id2index_find_query[query["id"]] = i + + # Sanity check: all the stages should have the same number of queries + if len(stage2num_queries) == 0: + num_queries_per_stage = 0 + else: + num_queries_per_stage = stage2num_queries.most_common(1)[0][1] + for stage, num_queries in stage2num_queries.items(): + assert ( + num_queries == num_queries_per_stage + ), f"Number of queries in stage {stage} is {num_queries}, expected {num_queries_per_stage}" + + for query_id, query in enumerate(queries): + h, w = id2imsize[query["image_id"]] + if ( + "input_box" in query + and query["input_box"] is not None + and len(query["input_box"]) > 0 + ): + bbox = box_xywh_to_xyxy(torch.as_tensor(query["input_box"])).view(-1, 4) + bbox[:, 0::2].mul_(w).clamp_(min=0, max=w) + bbox[:, 1::2].mul_(h).clamp_(min=0, max=h) + if "input_box_label" in query and query["input_box_label"] is not None: + bbox_label = torch.as_tensor( + query["input_box_label"], dtype=torch.long + ).view(-1) + assert len(bbox_label) == len(bbox) + else: + # assume the boxes are positives + bbox_label = torch.ones(len(bbox), dtype=torch.long) + else: + bbox = None + bbox_label = None + + if "input_points" in query and query["input_points"] is not None: + points = torch.as_tensor(query["input_points"]).view(1, -1, 3) + points[:, :, 0:1].mul_(w).clamp_(min=0, max=w) + points[:, :, 1:2].mul_(h).clamp_(min=0, max=h) + else: + points = None + + try: + original_image_id = int( + img_metadata[id2index_img[query["image_id"]]]["original_img_id"] + ) + except ValueError: + original_image_id = -1 + + try: + img_metadata_query = img_metadata[id2index_img[query["image_id"]]] + coco_image_id = ( + int(img_metadata_query["coco_img_id"]) + if "coco_img_id" in img_metadata_query + else query["id"] + ) + except KeyError: + coco_image_id = -1 + + try: + original_category_id = int(query["original_cat_id"]) + except (ValueError, KeyError): + original_category_id = -1 + + # For evaluation, we associate the ids of the object to be tracked to the query + if query["object_ids_output"]: + obj_id = query["object_ids_output"][0] + obj_idx = id2index_obj[obj_id] + image_idx = id2index_img[query["image_id"]] + object_id = images[image_idx].objects[obj_idx].object_id + frame_index = images[image_idx].objects[obj_idx].frame_index + else: + object_id = -1 + frame_index = -1 + + find_queries.append( + FindQueryLoaded( + # id=query["id"], + # query_type=qtype, + query_text=( + query["query_text"] if query["query_text"] is not None else "" + ), + image_id=id2index_img[query["image_id"]], + input_bbox=bbox, + input_bbox_label=bbox_label, + input_points=points, + object_ids_output=[ + id2index_obj[obj_id] for obj_id in query["object_ids_output"] + ], + is_exhaustive=query["is_exhaustive"], + is_pixel_exhaustive=( + query["is_pixel_exhaustive"] + if "is_pixel_exhaustive" in query + else ( + query["is_exhaustive"] if query["is_exhaustive"] else None + ) + ), + query_processing_order=query["query_processing_order"], + inference_metadata=InferenceMetadata( + coco_image_id=-1 if self.training else coco_image_id, + original_image_id=(-1 if self.training else original_image_id), + frame_index=frame_index, + original_category_id=original_category_id, + original_size=(h, w), + object_id=object_id, + ), + ) + ) + + return Datapoint( + find_queries=find_queries, + images=images, + raw_images=[p[1] for p in pil_images], + ) + + def __len__(self) -> int: + return len(self.ids) + + +class Sam3ImageDataset(CustomCocoDetectionAPI): + def __init__( + self, + img_folder, + ann_file, + transforms, + max_ann_per_img: int, + multiplier: int, + training: bool, + load_segmentation: bool = False, + max_train_queries: int = 81, + max_val_queries: int = 300, + fix_fname: bool = False, + is_sharded_annotation_dir: bool = False, + blurring_masks_path: Optional[str] = None, + use_caching: bool = True, + zstd_dict_path=None, + filter_query=None, + coco_json_loader: Callable = COCO_FROM_JSON, + limit_ids: int = None, + ): + super(Sam3ImageDataset, self).__init__( + img_folder, + ann_file, + fix_fname=fix_fname, + load_segmentation=load_segmentation, + training=training, + blurring_masks_path=blurring_masks_path, + use_caching=use_caching, + zstd_dict_path=zstd_dict_path, + filter_query=filter_query, + coco_json_loader=coco_json_loader, + limit_ids=limit_ids, + ) + + self._transforms = transforms + self.training = training + self.max_ann_per_img = max_ann_per_img + self.max_train_queries = max_train_queries + self.max_val_queries = max_val_queries + + self.repeat_factors = torch.ones(len(self.ids), dtype=torch.float32) + + self.repeat_factors *= multiplier + print(f"Raw dataset length = {len(self.ids)}") + + self._MAX_RETRIES = 100 + + def __getitem__(self, idx): + return self.__orig_getitem__(idx) + + def __orig_getitem__(self, idx): + for _ in range(self._MAX_RETRIES): + try: + datapoint = super(Sam3ImageDataset, self).__getitem__(idx) + + # This can be done better by filtering the offending find queries + # However, this requires care: + # - Delete any find/get query that may depend on the deleted one + # - Re-compute the indexes in the pointers to account for the deleted finds + for q in datapoint.find_queries: + if len(q.object_ids_output) > self.max_ann_per_img: + raise DecompressionBombError( + f"Too many outputs ({len(q.object_ids_output)})" + ) + + max_queries = ( + self.max_train_queries if self.training else self.max_val_queries + ) + + if len(datapoint.find_queries) > max_queries: + raise DecompressionBombError( + f"Too many find queries ({len(datapoint.find_queries)})" + ) + + if len(datapoint.find_queries) == 0: + raise DecompressionBombError("No find queries") + for transform in self._transforms: + datapoint = transform(datapoint, epoch=self.curr_epoch) + + break + except (DecompressionBombError, OSError, ValueError) as error: + sys.stderr.write(f"ERROR: got loading error on datapoint {idx}\n") + sys.stderr.write(f"Exception: {error}\n") + sys.stderr.write(traceback.format_exc()) + idx = (idx + 1) % len(self) + else: + raise RuntimeError( + f"Failed {self._MAX_RETRIES} times trying to load an image." + ) + + return datapoint diff --git a/detect_tools/sam3/sam3/train/data/sam3_video_dataset.py b/detect_tools/sam3/sam3/train/data/sam3_video_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..8ee8a4f82f1f37e598626ecde31742df9776ea12 --- /dev/null +++ b/detect_tools/sam3/sam3/train/data/sam3_video_dataset.py @@ -0,0 +1,327 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import copy + +import io +import json +import logging +import math +import os +import pickle +import random +import sys +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import torch +import torchvision + +# from decord import cpu, VideoReader + +from iopath.common.file_io import PathManager +from PIL import Image as PILImage + +from .sam3_image_dataset import Datapoint, Sam3ImageDataset + + +SEED = 42 + + +class VideoGroundingDataset(Sam3ImageDataset): + def __init__( + self, + num_stages_sample: int = 4, + stage_stride_min: int = 1, + stage_stride_max: int = 5, + random_reverse_time_axis: bool = True, + is_tiling_single_image: bool = False, + # By default, we remove find those queries with geometric inputs (input_box or input_points) + # when creating synthetic videos from frames (since they are not *video-level* text prompts). + # If we need them later, we can sample them on-the-fly via transforms or inside the model. + tile_img_keep_find_queries_with_geo_inputs: bool = False, + tile_img_keep_get_queries: bool = False, + # the maximum number of find queries (for each frame) to keep in a video; if the datapoint + # contains more queries per frame than this limit, we subsample them to avoid OOM errors + max_query_num: int = -1, # the default -1 means no limit + # whether to override the "is_exhaustive" flag of the loaded find queries to True + # (by default, our video datasets are ingested with is_exhaustive=False, since the YTVIS format + # annotations doesn't involve an "is_exhaustive" flag; this means that those unmatched (negative) + # detection queries or tracking queries do not receive a classification loss given that we have + # weak_loss=True in IABCEMdetr -- this could lead to false positives for both image detection + # and video association.) + override_query_is_exhaustive_to_true: bool = False, + # the maximum number of masklets in a video; if the datapoint contains more masklets + # than this limit, we skip the datapoint to avoid OOM errors (this is useful for + # training with large videos that contain many objects) + max_masklet_num_in_video: int = 300, # 300 masklets is usually OK to avoid OOM + **kwargs, + ): + """ + Loading video grounding data + + Video frame sampling parameters (for training only): + - num_stages_sample: number of frames to sample from the video during training + - stage_stride_min: minimum stride between sampled frames during training + - stage_stride_max: maximum stride between sampled frames during training (if it's + greater than stage_stride_min, the actual stride is sampled uniformly between min + and max; during inference, we always use all frames in the video with stride=1) + - random_reverse_time_axis: whether to randomly invert the video's temporal axis + (i.e. playing it backwards) during training + """ + super().__init__(**kwargs) + assert num_stages_sample >= 1 + assert stage_stride_min >= 1 + assert stage_stride_max >= stage_stride_min + self.num_stages_sample = num_stages_sample + self.stage_stride_min = stage_stride_min + self.stage_stride_max = stage_stride_max + self.random_reverse_time_axis = random_reverse_time_axis + self.is_tiling_single_image = is_tiling_single_image + self.tile_img_keep_find_queries_with_geo_inputs = ( + tile_img_keep_find_queries_with_geo_inputs + ) + self.tile_img_keep_get_queries = tile_img_keep_get_queries + self.max_query_num = max_query_num + self.override_query_is_exhaustive_to_true = override_query_is_exhaustive_to_true + self.max_masklet_num_in_video = max_masklet_num_in_video + self.rng = random.Random() + self.set_curr_epoch(0) + + def set_curr_epoch(self, epoch: int): + super().set_curr_epoch(epoch) + self.rng.seed(SEED + epoch) + + def _load_datapoint(self, index: int) -> Datapoint: + id = self.ids[index].item() + queries, annotations = self.coco.loadQueriesAndAnnotationsFromDatapoint(id) + + # we subsample the video frames during training + if self.training and not self.is_tiling_single_image: + # pick a random stride for sampling query stages (`randint` includes both ends) + stage_stride = self.rng.randint( + self.stage_stride_min, self.stage_stride_max + ) + stage_ids_to_keep = self._sample_stage_ids( + queries, self.num_stages_sample, stage_stride + ) + # filter the queries and annotations to keep only the selected stages + # (also remap the stage ids so that they are contiguous and start from 0) + reverse_time_axis = ( + self.rng.random() < 0.5 if self.random_reverse_time_axis else False + ) + queries, annotations, kept_img_ids = self._filter_query_and_anns( + queries, + annotations, + stage_ids_to_keep, + remap_stage_id=True, + reverse_time_axis=reverse_time_axis, + ) + pil_images, img_metadata = self._load_images(id, kept_img_ids) + if reverse_time_axis: + # reverse the temporal ordering of the images and their metadata + # so that the image order matches the query order + pil_images = pil_images[::-1] + img_metadata = img_metadata[::-1] + else: + pil_images, img_metadata = self._load_images(id) + + # check that all the images have the same image size (they are expected + # to have the same image size since they are frames from the same video) + assert all(p.size == pil_images[0][1].size for _, p in pil_images) + + queries.sort(key=lambda q: q["query_processing_order"]) + if self.override_query_is_exhaustive_to_true: + for query in queries: + query["is_exhaustive"] = True + datapoint = self.load_queries(pil_images, annotations, queries, img_metadata) + + # skip datapoints with too many masklets to avoid OOM errors + num_masklets_in_video = len(datapoint.images[0].objects) + if num_masklets_in_video > self.max_masklet_num_in_video > 0: + logging.warning( + f"Datapoint {id} has ({num_masklets_in_video=}), exceeding " + f"the maximum allowed ({self.max_masklet_num_in_video}). " + "Skipping this datapoint." + ) + next_index = (index + 1) % len(self) + return self._load_datapoint(next_index) # move to the next datapoint + + if self.is_tiling_single_image: + datapoint = self._tile_single_image_data(datapoint, self.num_stages_sample) + if self.max_query_num > 0: + datapoint = self._subsample_queries(datapoint, self.max_query_num) + + # ensure that all find queries have the same processing order as their image id + for query in datapoint.find_queries: + assert query.image_id == query.query_processing_order, ( + f"find query has inconsistent image_id and " + f"query_processing_order: {query.image_id=} vs " + f"{query.query_processing_order=}" + ) + return datapoint + + def _sample_stage_ids(self, queries, num_stages_sample, stage_stride): + """Sample a subset of stage ids from all queries.""" + # Later we can perhaps turn it into a Sampler class to be more flexible. + all_stage_ids = sorted(set(q["query_processing_order"] for q in queries)) + num_stages_total = len(all_stage_ids) + if num_stages_total < num_stages_sample: + raise ValueError("Not enough stages to sample") + + # the difference in index between the first and the last sampled stage ids + b_e_gap = (num_stages_sample - 1) * stage_stride + if b_e_gap > num_stages_total - 1: + # In this case, it's not possible to sample with the provide stride, + # so we use the maximum possible stride. + prev_stage_stride = stage_stride + stage_stride = math.floor((num_stages_total - 1) / (num_stages_sample - 1)) + logging.info( + f"lowering stride from {prev_stage_stride} to {stage_stride} to " + f"sample {num_stages_sample} stages (from {num_stages_total} total)" + ) + b_e_gap = (num_stages_sample - 1) * stage_stride + + # randomly select a starting stage id (`randint` includes both ends) + b_max = len(all_stage_ids) - 1 - b_e_gap + b = self.rng.randint(0, b_max) + e = b + b_e_gap + stage_ids_to_keep = all_stage_ids[b : e + 1 : stage_stride] + return stage_ids_to_keep + + def _filter_query_and_anns( + self, queries, annotations, stage_ids_to_keep, remap_stage_id, reverse_time_axis + ): + """Filter queries and annotations to only keep those in `stage_ids_to_keep`.""" + stage_ids_to_keep = set(stage_ids_to_keep) + kept_img_ids = set() + kept_stage_ids = set() + + # Filter queries -- keep those queries with stage_id in `stage_ids_to_keep` + filtered_queries = [] + for query in queries: + input_box = query.get("input_box", None) + input_points = query.get("input_points", None) + has_geo_input = input_box is not None or input_points is not None + if has_geo_input and not self.tile_img_keep_find_queries_with_geo_inputs: + continue + stage_id = query["query_processing_order"] + if stage_id in stage_ids_to_keep: + kept_img_ids.add(query["image_id"]) + kept_stage_ids.add(stage_id) + filtered_queries.append(query) + # Check that all frames in `stage_ids_to_keep` are present after filtering + all_frame_present = kept_stage_ids == stage_ids_to_keep + assert all_frame_present, f"{kept_stage_ids=} vs {stage_ids_to_keep=}" + if remap_stage_id: + # Remap those kept stage ids to be contiguous and starting from 0 + old_stage_ids = sorted(kept_stage_ids, reverse=reverse_time_axis) + stage_id_old2new = {old: new for new, old in enumerate(old_stage_ids)} + for query in filtered_queries: + ptr_x_is_empty = query["ptr_x_query_id"] in [None, -1] + ptr_y_is_empty = query["ptr_y_query_id"] in [None, -1] + assert ( + ptr_x_is_empty and ptr_y_is_empty + ), "Remapping stage ids is not supported for queries with non-empty ptr_x or ptr_y pointers" + query["query_processing_order"] = stage_id_old2new[ + query["query_processing_order"] + ] + + # Filter annotations -- keep those annotations with image_id in `kept_img_ids` + filtered_annotations = [ + ann for ann in annotations if ann["image_id"] in kept_img_ids + ] + + return filtered_queries, filtered_annotations, kept_img_ids + + def _tile_single_image_data(self, datapoint: Datapoint, num_stages_sample: int): + """ + Tile a single image and its queries to simulate video frames. The output is a + datapoint with *identical video frames* (i.e. the same static image) and needs + further transforms (e.g. affine) to get video frames with different content. + """ + # tile `images: List[Image]` + assert len(datapoint.images) == 1, "Expected only one single image" + tiled_images = [ + copy.deepcopy(datapoint.images[0]) for _ in range(num_stages_sample) + ] + for stage_id, img in enumerate(tiled_images): + for obj in img.objects: + obj.frame_index = stage_id + + # tile `raw_images: Optional[List[PILImage.Image]] = None` + tiled_raw_images = None + if datapoint.raw_images is not None: + assert len(datapoint.raw_images) == 1, "Expected only one single image" + tiled_raw_images = [ + datapoint.raw_images[0].copy() for _ in range(num_stages_sample) + ] + + # tile `find_queries: List[FindQueryLoaded]` + tiled_find_queries_per_stage = [[] for _ in range(num_stages_sample)] + for query in datapoint.find_queries: + assert query.image_id == 0 + assert query.query_processing_order == 0 + # check and make sure that a query doesn't contain pointers or references + # to other queries (that cannot be tiled) + assert query.ptr_x is None and query.ptr_y is None + assert query.ptr_mem is None + # assert query.wkdata_qid is None + # assert query.other_positive_qids is None + # assert query.negative_qids is None + has_geo_input = ( + query.input_bbox is not None or query.input_points is not None + ) + if has_geo_input and not self.tile_img_keep_find_queries_with_geo_inputs: + continue + for stage_id in range(num_stages_sample): + # copy the query and update the image_id + new_query = copy.deepcopy(query) + new_query.image_id = stage_id + new_query.query_processing_order = stage_id + if new_query.inference_metadata is not None: + new_query.inference_metadata.frame_index = stage_id + tiled_find_queries_per_stage[stage_id].append(new_query) + + tiled_find_queries = sum(tiled_find_queries_per_stage, []) + + # tile `get_queries: List[GetQuery]` -- we skip them for now (since they involve + # a pointer to a find query that is complicated to tile, and there is not an + # imminent use case for them in the video grounding task in the near future) + if self.tile_img_keep_get_queries: + raise NotImplementedError("Tiling get queries is not implemented yet") + else: + tiled_get_queries = [] + + return Datapoint( + images=tiled_images, + raw_images=tiled_raw_images, + find_queries=tiled_find_queries, + get_queries=tiled_get_queries, + ) + + def _subsample_queries(self, datapoint: Datapoint, max_query_num: int): + """Subsample to keep at most `max_query_num` queries per frame in a datapoint.""" + # aggregate the find queries per stage + num_frames = max(q.query_processing_order for q in datapoint.find_queries) + 1 + find_queries_per_stage = [[] for _ in range(num_frames)] + for query in datapoint.find_queries: + find_queries_per_stage[query.query_processing_order].append(query) + + # verify that all the stages have the same number of queries + num_queries_per_stage = len(find_queries_per_stage[0]) + for queries in find_queries_per_stage: + assert len(queries) == num_queries_per_stage + if max_query_num <= 0 or num_queries_per_stage <= max_query_num: + return datapoint + + # subsample the queries to keep only `max_query_num` queries + sampled_inds = self.rng.sample(range(num_queries_per_stage), max_query_num) + sampled_find_queries_per_stage = [ + [queries[idx] for idx in sampled_inds] for queries in find_queries_per_stage + ] + sampled_find_queries = sum(sampled_find_queries_per_stage, []) + return Datapoint( + images=datapoint.images, + raw_images=datapoint.raw_images, + find_queries=sampled_find_queries, + get_queries=datapoint.get_queries, + ) diff --git a/detect_tools/sam3/sam3/train/data/torch_dataset.py b/detect_tools/sam3/sam3/train/data/torch_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..05556171c0decff7c110803afeb1226b1a157e48 --- /dev/null +++ b/detect_tools/sam3/sam3/train/data/torch_dataset.py @@ -0,0 +1,52 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from typing import Callable, Iterable, Optional + +from torch.utils.data import DataLoader, Dataset, DistributedSampler, IterableDataset + + +class TorchDataset: + def __init__( + self, + dataset: Dataset, + batch_size: int, + num_workers: int, + shuffle: bool, + pin_memory: bool, + drop_last: bool, + collate_fn: Optional[Callable] = None, + worker_init_fn: Optional[Callable] = None, + enable_distributed_sampler=True, + ) -> None: + self.dataset = dataset + self.batch_size = batch_size + self.num_workers = num_workers + self.shuffle = shuffle + self.pin_memory = pin_memory + self.drop_last = drop_last + self.collate_fn = collate_fn + self.worker_init_fn = worker_init_fn + assert not isinstance(self.dataset, IterableDataset), "Not supported yet" + if enable_distributed_sampler: + self.sampler = DistributedSampler(self.dataset, shuffle=self.shuffle) + else: + self.sampler = None + + def get_loader(self, epoch) -> Iterable: + if self.sampler: + self.sampler.set_epoch(epoch) + if hasattr(self.dataset, "epoch"): + self.dataset.epoch = epoch + if hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + return DataLoader( + self.dataset, + batch_size=self.batch_size, + num_workers=self.num_workers, + pin_memory=self.pin_memory, + drop_last=self.drop_last, + sampler=self.sampler, + collate_fn=self.collate_fn, + worker_init_fn=self.worker_init_fn, + ) diff --git a/detect_tools/sam3/sam3/train/loss/__init__.py b/detect_tools/sam3/sam3/train/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/train/loss/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/train/loss/loss_fns.py b/detect_tools/sam3/sam3/train/loss/loss_fns.py new file mode 100644 index 0000000000000000000000000000000000000000..3b61d7f59f9b9bd2c155d1e118954d6215fefefe --- /dev/null +++ b/detect_tools/sam3/sam3/train/loss/loss_fns.py @@ -0,0 +1,1319 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +import warnings + +import torch +import torch.distributed +import torch.nn.functional as F +import torchmetrics + +from sam3.model import box_ops + +from sam3.model.data_misc import interpolate + +from sam3.train.loss.sigmoid_focal_loss import ( + triton_sigmoid_focal_loss, + triton_sigmoid_focal_loss_reduce, +) +from torch import nn + +from .mask_sampling import ( + calculate_uncertainty, + get_uncertain_point_coords_with_randomness, + point_sample, +) + + +CORE_LOSS_KEY = "core_loss" + + +def instance_masks_to_semantic_masks( + instance_masks: torch.Tensor, num_instances: torch.Tensor +) -> torch.Tensor: + """This function converts instance masks to semantic masks. + It accepts a collapsed batch of instances masks (ie all instance masks are concatenated in a single tensor) and + the number of instances in each image of the batch. + It returns a mask with the same spatial dimensions as the input instance masks, where for each batch element the + semantic mask is the union of all the instance masks in the batch element. + + If for a given batch element there are no instances (ie num_instances[i]==0), the corresponding semantic mask will be a tensor of zeros. + + Args: + instance_masks (torch.Tensor): A tensor of shape (N, H, W) where N is the number of instances in the batch. + num_instances (torch.Tensor): A tensor of shape (B,) where B is the batch size. It contains the number of instances + in each image of the batch. + + Returns: + torch.Tensor: A tensor of shape (B, H, W) where B is the batch size and H, W are the spatial dimensions of the + input instance masks. + """ + if num_instances.sum() == 0: + # all negative batch, create a tensor of zeros (B, 1, 1) + return num_instances.unsqueeze(-1).unsqueeze(-1) + + masks_per_query = torch.split(instance_masks, num_instances.tolist()) + + return torch.stack([torch.any(masks, dim=0) for masks in masks_per_query], dim=0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def dice_loss(inputs, targets, num_boxes, loss_on_multimask=False, reduce=True): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + try: + loss = _dice_loss(inputs, targets, num_boxes, loss_on_multimask, reduce) + except torch.OutOfMemoryError: + logging.error("GPU OOM, computing dice loss on CPU") + # try to recover from GPU OOM by moving tensors to CPU and computing loss there + orig_device = inputs.device + inputs = inputs.cpu() + targets = targets.cpu() + if isinstance(num_boxes, torch.Tensor): + num_boxes = num_boxes.cpu() + loss = _dice_loss(inputs, targets, num_boxes, loss_on_multimask, reduce) + loss = loss.to(orig_device) + + return loss + + +def _dice_loss(inputs, targets, num_boxes, loss_on_multimask=False, reduce=True): + inputs = inputs.sigmoid() + if loss_on_multimask: + # inputs and targets are [N, M, H, W] where M corresponds to multiple predicted masks + assert inputs.dim() == 4 and targets.dim() == 4 + # flatten spatial dimension while keeping multimask channel dimension + inputs = inputs.flatten(2) + targets = targets.flatten(2) + numerator = 2 * (inputs * targets).sum(-1) + else: + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + if loss_on_multimask: + return loss / num_boxes + if not reduce: + return loss + return loss.sum() / num_boxes + + +def sigmoid_focal_loss( + inputs, + targets, + num_boxes, + alpha: float = 0.25, + gamma: float = 2, + loss_on_multimask=False, + reduce=True, + triton=True, +): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + Returns: + Loss tensor + """ + if not (0 <= alpha <= 1) and triton: + raise RuntimeError(f"Alpha should be in [0,1], got {alpha}") + if triton: + if reduce and not loss_on_multimask: + loss = triton_sigmoid_focal_loss_reduce(inputs, targets, alpha, gamma) + return loss / (num_boxes * inputs.shape[1]) + + loss = triton_sigmoid_focal_loss(inputs, targets, alpha, gamma) + else: + prob = inputs.sigmoid() + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + if not reduce: + return loss + + if loss_on_multimask: + # loss is [N, M, H, W] where M corresponds to multiple predicted masks + assert loss.dim() == 4 + return loss.flatten(2).mean(-1) / num_boxes # average over spatial dims + return loss.mean(1).sum() / num_boxes + + +def iou_loss( + inputs, targets, pred_ious, num_boxes, loss_on_multimask=False, use_l1_loss=False +): + """MSE loss between predicted IoUs and actual IoUs between inputs and targets.""" + assert inputs.dim() == 4 and targets.dim() == 4 + pred_mask = inputs.flatten(2) > 0 + gt_mask = targets.flatten(2) > 0 + area_i = torch.sum(pred_mask & gt_mask, dim=-1).float() + area_u = torch.sum(pred_mask | gt_mask, dim=-1).float() + actual_ious = area_i / torch.clamp(area_u, min=1.0) + + if use_l1_loss: + loss = F.l1_loss(pred_ious, actual_ious, reduction="none") + else: + loss = F.mse_loss(pred_ious, actual_ious, reduction="none") + if loss_on_multimask: + return loss / num_boxes + return loss.sum() / num_boxes + + +@torch.jit.script +def _contrastive_align(logits, positive_map): + positive_logits = -logits.masked_fill(~positive_map, 0) + negative_logits = logits # .masked_fill(positive_map, -1000000) + + boxes_with_pos = positive_map.any(2) + pos_term = positive_logits.sum(2) + neg_term = negative_logits.logsumexp(2) + + nb_pos = positive_map.sum(2) + 1e-6 + + box_to_token_loss = ( + (pos_term / nb_pos + neg_term).masked_fill(~boxes_with_pos, 0).sum() + ) + + tokens_with_pos = positive_map.any(1) + pos_term = positive_logits.sum(1) + neg_term = negative_logits.logsumexp(1) + + nb_pos = positive_map.sum(1) + 1e-6 + + tokens_to_boxes_loss = ( + (pos_term / nb_pos + neg_term).masked_fill(~tokens_with_pos, 0).sum() + ) + return (box_to_token_loss + tokens_to_boxes_loss) / 2 + + +def _get_src_permutation_idx(indices): + # permute predictions following indices + batch_idx = torch.cat( + [torch.full_like(src, i) for i, (src, _) in enumerate(indices)] + ) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + +class LossWithWeights(nn.Module): + def __init__(self, weight_dict, compute_aux, supports_o2m_loss=True): + super().__init__() + # weights for each computed loss key (those losses not in weight_dict + # will not be aggregated in the final reduced core loss) + self.weight_dict = weight_dict if weight_dict is not None else {} + # whether this loss will be applied on auxiliary outputs + self.compute_aux = compute_aux + self.supports_o2m_loss = supports_o2m_loss + self.target_keys = [] + + def forward(self, *args, is_aux=False, **kwargs): + if is_aux and not self.compute_aux: + return {CORE_LOSS_KEY: 0.0} + losses = self.get_loss(*args, **kwargs) + losses[CORE_LOSS_KEY] = self.reduce_loss(losses) + return losses + + def get_loss(self, **kwargs): + raise NotImplementedError() + + def reduce_loss(self, losses): + reduced_loss = 0.0 + for loss_key, weight in self.weight_dict.items(): + if loss_key not in losses: + raise ValueError(f"{type(self)} doesn't compute {loss_key}") + if weight != 0: + reduced_loss += losses[loss_key] * weight + + return reduced_loss + + +class IABCEMdetr(LossWithWeights): + def __init__( + self, + pos_weight, + weight_dict=None, + compute_aux=True, + gamma=0, + weak_loss=True, + alpha=0.25, + pad_n_queries=None, + pad_scale_pos=1.0, + use_separate_loss_for_det_and_trk=False, + num_det_queries=None, + det_exhaustive_loss_scale_pos=1.0, + det_exhaustive_loss_scale_neg=1.0, + det_non_exhaustive_loss_scale_pos=1.0, + det_non_exhaustive_loss_scale_neg=1.0, + trk_loss_scale_pos=1.0, + trk_loss_scale_neg=1.0, + no_loss_for_fp_propagation=False, + apply_loss_to_det_queries_in_video_grounding=True, + use_presence=False, + use_presence_semgseg=False, # If True, use presence scores from the semgseg head. + presence_alpha=0.5, + presence_gamma=0.0, + pos_focal: bool = False, # for box scores, use focal loss for positives as well + ): + super().__init__(weight_dict, compute_aux) + self.pos_weight = pos_weight + self.gamma = gamma + self.weak_loss = weak_loss + self.alpha = alpha + self.target_keys.append("boxes_xyxy") + self.no_loss_for_fp_propagation = no_loss_for_fp_propagation + if self.weak_loss: + self.target_keys.append("is_exhaustive") + # NOTE: This is hacky solution to have the same CE loss scale across datasets where the model might predict different number of object queries for different tasks. + # If not None, we assume there are a total pad_n_queries object queries. + # For example, if the model predicts only 1 object query and pad_n_queries=100, we pad the predictions with 99 zero preds. + # Currently this only affects the BCE loss and not the F1 score. + self.pad_n_queries = pad_n_queries + self.pad_scale_pos = pad_scale_pos + if self.pad_scale_pos != 1.0: + assert self.pad_n_queries is not None + # whether to use presence scores + self.use_presence = use_presence + self.use_presence_semgseg = use_presence_semgseg + if self.use_presence_semgseg: + assert self.use_presence + self.presence_alpha = presence_alpha + self.presence_gamma = presence_gamma + self.pos_focal = pos_focal + + # Decoupled loss for detection and tracking queries + self.apply_loss_to_det_queries_in_video_grounding = ( + apply_loss_to_det_queries_in_video_grounding + ) + self.use_separate_loss_for_det_and_trk = use_separate_loss_for_det_and_trk + if num_det_queries is not None: + logging.warning("note: it's not needed to set num_det_queries anymore") + if self.use_separate_loss_for_det_and_trk: + assert not self.weak_loss, "Do not use weak_loss in this case -- set separate loss for detection and tracking queries instead" + self.det_exhaustive_loss_scale_pos = det_exhaustive_loss_scale_pos + self.det_exhaustive_loss_scale_neg = det_exhaustive_loss_scale_neg + self.det_non_exhaustive_loss_scale_pos = det_non_exhaustive_loss_scale_pos + self.det_non_exhaustive_loss_scale_neg = det_non_exhaustive_loss_scale_neg + self.trk_loss_scale_pos = trk_loss_scale_pos + self.trk_loss_scale_neg = trk_loss_scale_neg + else: + assert ( + det_exhaustive_loss_scale_pos == 1.0 + and det_exhaustive_loss_scale_neg == 1.0 + and det_non_exhaustive_loss_scale_pos == 1.0 + and det_non_exhaustive_loss_scale_neg == 1.0 + and trk_loss_scale_pos == 1.0 + and trk_loss_scale_neg == 1.0 + ), "If not using separate loss for detection and tracking queries, separate detection and tracking loss scales should all be 1.0" + + def get_loss(self, outputs, targets, indices, num_boxes): + assert len(outputs["pred_logits"].shape) > 2, "Incorrect predicted logits shape" + assert outputs["pred_logits"].shape[-1] == 1, "Incorrect predicted logits shape" + src_logits = outputs["pred_logits"].squeeze(-1) + prob = src_logits.sigmoid() + + with torch.no_grad(): + target_classes = torch.full( + src_logits.shape[:2], + 0, + dtype=torch.float, + device=src_logits.device, + ) + target_classes[(indices[0], indices[1])] = 1 + src_boxes_xyxy = outputs["pred_boxes_xyxy"][(indices[0], indices[1])] + target_boxes_giou = ( + targets["boxes_xyxy"][indices[2]] + if indices[2] is not None + else targets["boxes_xyxy"] + ) + + iou = box_ops.fast_diag_box_iou(src_boxes_xyxy, target_boxes_giou) + t = prob[(indices[0], indices[1])] ** self.alpha * iou ** (1 - self.alpha) + t = torch.clamp(t, 0.01).detach() + positive_target_classes = target_classes.clone() + positive_target_classes[(indices[0], indices[1])] = t + + # Soft loss on positives + if self.pos_focal: + loss_bce = sigmoid_focal_loss( + src_logits.contiguous(), + positive_target_classes, + num_boxes=1, + alpha=0.5, + gamma=self.gamma, + reduce=False, + ) + else: + loss_bce = F.binary_cross_entropy_with_logits( + src_logits, positive_target_classes, reduction="none" + ) + loss_bce = loss_bce * target_classes * self.pos_weight + + if ( + self.pad_n_queries is not None + and isinstance(self.pad_n_queries, int) + and loss_bce.size(1) < self.pad_n_queries + ): + loss_bce = loss_bce * self.pad_scale_pos + # Negatives + loss_bce = loss_bce + F.binary_cross_entropy_with_logits( + src_logits, target_classes, reduction="none" + ) * (1 - target_classes) * (prob**self.gamma) + + # Optionally, not applying IABCEMdetr loss to detection queries in video. + is_video_grounding = outputs.get("is_video_grounding_batch", False) + if is_video_grounding and not self.apply_loss_to_det_queries_in_video_grounding: + Q_det = outputs["Q_det"] + loss_bce[:, :Q_det] *= 0.0 + presence_loss = torch.tensor(0.0, device=src_logits.device) + presence_dec_acc = torch.tensor(0.0, device=src_logits.device) + if self.use_presence: + # no classifiction loss for individual tokens if no target gt + # cannot directly use targets["num_boxes"] to check if some + # GT box exists as there may be dummy boxes for "invisible objects" + # in video grounding data + + gt_padded_object_ids = targets["object_ids_padded"] # (B, H) + gt_padded_boxes = targets["boxes_padded"] # (B, H, 4) shape, CxCyWH + gt_padded_is_visible = ( + (gt_padded_object_ids >= 0) + & (gt_padded_boxes[..., 2] > 0) # width > 0 + & (gt_padded_boxes[..., 3] > 0) # height > 0 + ) + keep_loss = (gt_padded_is_visible.sum(dim=-1)[..., None] != 0).float() + + loss_bce = loss_bce * keep_loss + + if self.use_presence_semgseg: + # no loss here, has it's own separate loss computation + assert "presence_logit_dec" not in outputs + elif "presence_logit_dec" in outputs: + presence_logits = outputs["presence_logit_dec"].view_as(keep_loss) + bs = presence_logits.shape[0] + presence_loss = sigmoid_focal_loss( + presence_logits, + keep_loss, + # not num_boxes, but we'll use it to normalize by bs + num_boxes=bs, + alpha=self.presence_alpha, + gamma=self.presence_gamma, + ) + pred = (presence_logits.sigmoid() > 0.5).float() + presence_dec_acc = (pred == keep_loss).float().mean() + else: + # for o2m, nothing to do + pass + + if self.weak_loss: + assert not self.use_separate_loss_for_det_and_trk, "Do not use weak_loss in this case -- set separate loss for detection and tracking queries instead" + + # nullify the negative loss for the non-exhaustive classes + assert loss_bce.shape[0] == targets["is_exhaustive"].shape[0] + assert targets["is_exhaustive"].ndim == 1 + + loss_mask = (~targets["is_exhaustive"]).view(-1, 1).expand_as(loss_bce) + # restrict the mask to the negative supervision + loss_mask = loss_mask & (target_classes < 0.5) + loss_mask = ~loss_mask + # Mask the loss + loss_bce = loss_bce * loss_mask.float() + # Average + loss_bce = loss_bce.sum() / (loss_mask.sum() + 1e-6) + else: + # apply separate loss weights to detection and tracking queries + if self.use_separate_loss_for_det_and_trk: + Q_det = outputs["Q_det"] + assert loss_bce.size(1) >= Q_det + is_positive = target_classes > 0.5 + is_positive_det = is_positive[:, :Q_det] + is_positive_trk = is_positive[:, Q_det:] + assert loss_bce.size(0) == targets["is_exhaustive"].size(0) + is_exhaustive = targets["is_exhaustive"].unsqueeze(1).bool() + loss_scales = torch.zeros_like(loss_bce) + # detection query loss weights + loss_scales[:, :Q_det] = ( + (is_exhaustive & is_positive_det).float() + * self.det_exhaustive_loss_scale_pos + + (is_exhaustive & ~is_positive_det).float() + * self.det_exhaustive_loss_scale_neg + + (~is_exhaustive & is_positive_det).float() + * self.det_non_exhaustive_loss_scale_pos + + (~is_exhaustive & ~is_positive_det).float() + * self.det_non_exhaustive_loss_scale_neg + ) + # tracking query weights + loss_scales[:, Q_det:] = ( + is_positive_trk.float() * self.trk_loss_scale_pos + + (~is_positive_trk).float() * self.trk_loss_scale_neg + ) + # apply the loss weights + + # if the id is -2 means it is a fp propagation , we don't apply the loss to them + if self.no_loss_for_fp_propagation: + is_original_queries = outputs["pred_old_obj_ids"] != -2 + loss_scales *= (is_exhaustive | is_original_queries).float() + + loss_bce = loss_bce * loss_scales + + if self.pad_n_queries is None or loss_bce.size(1) >= self.pad_n_queries: + loss_bce = loss_bce.mean() + else: + assert isinstance(self.pad_n_queries, int) + assert ( + loss_bce.size(1) < self.pad_n_queries + ), f"The number of predictions is more than the expected total after padding. Got {loss_bce.size(1)} predictions." + loss_bce = loss_bce.sum() / (self.pad_n_queries * loss_bce.size(0)) + + bce_f1 = torchmetrics.functional.f1_score( + src_logits.sigmoid().flatten(), + target=target_classes.flatten().long(), + task="binary", + ) + + losses = { + "loss_ce": loss_bce, + "ce_f1": bce_f1, + "presence_loss": presence_loss, + "presence_dec_acc": presence_dec_acc, + } + return losses + + +class Boxes(LossWithWeights): + def __init__( + self, + weight_dict=None, + compute_aux=True, + apply_loss_to_det_queries_in_video_grounding=True, + ): + super().__init__(weight_dict, compute_aux) + self.apply_loss_to_det_queries_in_video_grounding = ( + apply_loss_to_det_queries_in_video_grounding + ) + self.target_keys.extend(["boxes", "boxes_xyxy"]) + + def get_loss(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss + targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] + The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size. + """ + # Optionally, not applying Boxes loss to detection queries in video. + is_video_grounding = outputs.get("is_video_grounding_batch", False) + if is_video_grounding and not self.apply_loss_to_det_queries_in_video_grounding: + indices = _keep_only_trk_queries_in_match_inds( + indices, Q_det=outputs["Q_det"] + ) + + assert "pred_boxes" in outputs + # idx = self._get_src_permutation_idx(indices) + src_boxes = outputs["pred_boxes"][(indices[0], indices[1])] + src_boxes_xyxy = outputs["pred_boxes_xyxy"][(indices[0], indices[1])] + target_boxes = ( + targets["boxes"] if indices[2] is None else targets["boxes"][indices[2]] + ) + target_boxes_giou = ( + targets["boxes_xyxy"] + if indices[2] is None + else targets["boxes_xyxy"][indices[2]] + ) + + loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none") + + losses = {} + losses["loss_bbox"] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - box_ops.fast_diag_generalized_box_iou( + src_boxes_xyxy, target_boxes_giou + ) + losses["loss_giou"] = loss_giou.sum() / num_boxes + return losses + + +class Masks(LossWithWeights): + def __init__( + self, + weight_dict=None, + compute_aux=False, + focal_alpha=0.25, + focal_gamma=2, + num_sample_points=None, + oversample_ratio=None, + importance_sample_ratio=None, + apply_loss_to_det_queries_in_video_grounding=True, + ): + super().__init__(weight_dict, compute_aux) + if compute_aux: + warnings.warn("Masks loss usually shouldn't be applied to aux outputs") + self.focal_alpha = focal_alpha + self.focal_gamma = focal_gamma + self.num_sample_points = num_sample_points + self.oversample_ratio = oversample_ratio + self.importance_sample_ratio = importance_sample_ratio + self.apply_loss_to_det_queries_in_video_grounding = ( + apply_loss_to_det_queries_in_video_grounding + ) + self.target_keys.extend(["masks", "is_valid_mask"]) + + def _sampled_loss(self, src_masks, target_masks, num_boxes): + assert len(src_masks.shape) == 3 and len(target_masks.shape) == 3 + src_masks = src_masks[:, None] + target_masks = target_masks[:, None] + with torch.no_grad(): + # Sample point_coords + point_coords = get_uncertain_point_coords_with_randomness( + src_masks, + calculate_uncertainty, + self.num_sample_points, + self.oversample_ratio, + self.importance_sample_ratio, + ) + + # get GT labels + sampled_target_masks = point_sample( + target_masks, + point_coords, + align_corners=False, + ).squeeze(1) + + sampled_src_masks = point_sample( + src_masks, + point_coords, + align_corners=False, + ).squeeze(1) + + losses = { + "loss_mask": sigmoid_focal_loss( + sampled_src_masks, + sampled_target_masks, + num_boxes, + alpha=self.focal_alpha, + gamma=self.focal_gamma, + ), + "loss_dice": dice_loss(sampled_src_masks, sampled_target_masks, num_boxes), + } + # Not needed for backward + del src_masks + del target_masks + + return losses + + def get_loss(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the masks: the focal loss and the dice loss. + targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + """ + assert "pred_masks" in outputs + assert "is_valid_mask" in targets + # Optionally, not applying Masks loss to detection queries in video. + is_video_grounding = outputs.get("is_video_grounding_batch", False) + if is_video_grounding and not self.apply_loss_to_det_queries_in_video_grounding: + indices = _keep_only_trk_queries_in_match_inds( + indices, Q_det=outputs["Q_det"] + ) + + src_masks = outputs["pred_masks"] + + # Dataset doesn't have segmentation masks + if targets["masks"] is None: + return { + "loss_mask": torch.tensor(0.0, device=src_masks.device), + "loss_dice": torch.tensor(0.0, device=src_masks.device), + } + + target_masks = ( + targets["masks"] if indices[2] is None else targets["masks"][indices[2]] + ) + target_masks = target_masks.to(src_masks) + keep = ( + targets["is_valid_mask"] + if indices[2] is None + else targets["is_valid_mask"][indices[2]] + ) + + src_masks = src_masks[(indices[0], indices[1])] + + # Remove invalid masks from loss + src_masks = src_masks[keep] + target_masks = target_masks[keep] + + if self.num_sample_points is not None: + # Compute loss on sampled points for the Mask + losses = self._sampled_loss(src_masks, target_masks, num_boxes) + + else: + # upsample predictions to the target size + if target_masks.shape[0] == 0 and src_masks.shape[0] == 0: + src_masks = src_masks.flatten(1) + target_masks = target_masks.reshape(src_masks.shape) + else: + if len(src_masks.shape) == 3: + src_masks = src_masks[:, None] + if src_masks.dtype == torch.bfloat16: + # Bilinear interpolation does not support bf16 + src_masks = src_masks.to(dtype=torch.float32) + src_masks = interpolate( + src_masks, + size=target_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ) + src_masks = src_masks[:, 0].flatten(1) + target_masks = target_masks.flatten(1) + + losses = { + "loss_mask": sigmoid_focal_loss( + src_masks, + target_masks, + num_boxes, + alpha=self.focal_alpha, + gamma=self.focal_gamma, + ), + "loss_dice": dice_loss(src_masks, target_masks, num_boxes), + } + + return losses + + +# class MultiStepIteractiveMasks(LossWithWeights): +# def __init__( +# self, +# weight_dict=None, +# compute_aux=False, +# focal_alpha=0.25, +# focal_gamma=2, +# ): +# warnings.warn( +# "MultiStepIteractiveMasks is deprecated. Please use MultiStepMultiMasksAndIous", +# DeprecationWarning, +# ) +# super().__init__(weight_dict, compute_aux) +# self.focal_alpha = focal_alpha +# self.focal_gamma = focal_gamma +# self.target_keys.extend(["masks"]) + +# def get_loss(self, outputs, targets, indices, num_boxes): +# """Compute the losses related to the masks: the focal loss and the dice loss. +# targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + +# Unlike `Masks`, here the "multistep_pred_masks" can have multiple channels, each +# corresponding to one iterative prediction step in SAM-style training. We treat each +# channel as a mask prediction and sum the loss across channels. +# """ +# src_masks = outputs["multistep_pred_masks"] +# target_masks = targets["masks"] +# assert src_masks.size(0) == target_masks.size(0) +# assert src_masks.dim() == 4 +# assert target_masks.dim() == 3 + +# # tile target_masks according to the number of +# # channels `src_masks`. +# num_steps = src_masks.size(1) +# target_masks = target_masks.unsqueeze(1).to(src_masks.dtype) +# if num_steps > 1: +# target_masks = target_masks.repeat(1, num_steps, 1, 1) + +# # resize `src_masks` to target mask resolution +# if src_masks.shape != target_masks.shape: +# src_masks = interpolate( +# src_masks, +# size=target_masks.shape[-2:], +# mode="bilinear", +# align_corners=False, +# ) +# assert src_masks.shape == target_masks.shape + +# # flatten the multiple steps in to the batch dimension +# src_masks = src_masks.flatten(0, 1).flatten(1) +# target_masks = target_masks.flatten(0, 1).flatten(1) +# losses = { +# "loss_mask": sigmoid_focal_loss( +# src_masks, +# target_masks, +# num_boxes, +# alpha=self.focal_alpha, +# gamma=self.focal_gamma, +# ), +# "loss_dice": dice_loss(src_masks, target_masks, num_boxes), +# } + +# return losses + + +# class MultiStepMultiMasksAndIous(LossWithWeights): +# def __init__( +# self, +# weight_dict=None, +# compute_aux=False, +# focal_alpha=0.25, +# focal_gamma=2, +# # if True, back-prop on all predicted ious +# # not just the one with lowest loss_combo +# supervise_all_iou=False, +# # Less slack vs MSE loss in [-1, 1] error range +# iou_use_l1_loss=False, +# # Settings for obj score prediction +# pred_obj_scores=False, +# focal_gamma_obj_score=0.0, +# focal_alpha_obj_score=-1, +# ): +# super().__init__(weight_dict, compute_aux) +# self.focal_alpha = focal_alpha +# self.focal_gamma = focal_gamma +# self.target_keys.extend(["masks"]) +# assert "loss_mask" in self.weight_dict +# assert "loss_dice" in self.weight_dict +# assert "loss_iou" in self.weight_dict +# if "loss_class" not in self.weight_dict: +# self.weight_dict["loss_class"] = 0.0 +# self.focal_alpha_obj_score = focal_alpha_obj_score +# self.focal_gamma_obj_score = focal_gamma_obj_score +# self.supervise_all_iou = supervise_all_iou +# self.iou_use_l1_loss = iou_use_l1_loss +# self.pred_obj_scores = pred_obj_scores + +# def get_loss(self, outputs, targets, indices, num_boxes): +# """ +# Compute the losses related to the masks: the focal loss and the dice loss. +# and also the MSE loss between predicted IoUs and actual IoUs. + +# Here "multistep_pred_multimasks_high_res" is a list of multimasks (tensors +# of shape [N, M, H, W], where M could be 1 or larger, corresponding to +# one or multiple predicted masks from a click. + +# We back-propagate focal, dice and iou losses only on the prediction channel +# with the lowest focal+dice loss between predicted mask and ground-truth. +# """ + +# target_masks = targets["masks"].unsqueeze(1).float() +# assert target_masks.dim() == 4 # [N, 1, H, W] +# src_masks_list = outputs["multistep_pred_multimasks_high_res"] +# ious_list = outputs["multistep_pred_ious"] +# object_score_logits_list = outputs["multistep_object_score_logits"] + +# assert len(src_masks_list) == len(ious_list) +# assert len(object_score_logits_list) == len(ious_list) + +# # Remove invalid masks from loss +# keep = targets["is_valid_mask"] +# target_masks = target_masks[keep] + +# # accumulate the loss over prediction steps +# losses = {"loss_mask": 0, "loss_dice": 0, "loss_iou": 0, "loss_class": 0} +# for src_masks, ious, object_score_logits in zip( +# src_masks_list, ious_list, object_score_logits_list +# ): +# object_score_logits = object_score_logits[keep] +# ious = ious[keep] +# src_masks = src_masks[keep] +# self._update_losses( +# losses, src_masks, target_masks, ious, num_boxes, object_score_logits +# ) +# return losses + +# def _update_losses( +# self, losses, src_masks, target_masks, ious, num_boxes, object_score_logits +# ): +# target_masks = target_masks.expand_as(src_masks) +# # get focal, dice and iou loss on all output masks in a prediction step +# loss_multimask = sigmoid_focal_loss( +# src_masks, +# target_masks, +# num_boxes, +# alpha=self.focal_alpha, +# gamma=self.focal_gamma, +# loss_on_multimask=True, +# triton=False, # only use triton if alpha > 0 +# ) +# loss_multidice = dice_loss( +# src_masks, target_masks, num_boxes, loss_on_multimask=True +# ) +# if not self.pred_obj_scores: +# loss_class = torch.tensor( +# 0.0, dtype=loss_multimask.dtype, device=loss_multimask.device +# ) +# target_obj = torch.ones( +# loss_multimask.shape[0], +# 1, +# dtype=loss_multimask.dtype, +# device=loss_multimask.device, +# ) +# else: +# target_obj = torch.any((target_masks[:, 0] > 0).flatten(1), dim=-1)[ +# ..., None +# ].float() +# loss_class = sigmoid_focal_loss( +# object_score_logits, +# target_obj, +# num_boxes, +# alpha=self.focal_alpha_obj_score, +# gamma=self.focal_gamma_obj_score, +# triton=False, +# ) + +# loss_multiiou = iou_loss( +# src_masks, +# target_masks, +# ious, +# num_boxes, +# loss_on_multimask=True, +# use_l1_loss=self.iou_use_l1_loss, +# ) +# assert loss_multimask.dim() == 2 +# assert loss_multidice.dim() == 2 +# assert loss_multiiou.dim() == 2 +# if loss_multimask.size(1) > 1: +# # take the mask indices with the smallest focal + dice loss for back propagation +# loss_combo = ( +# loss_multimask * self.weight_dict["loss_mask"] +# + loss_multidice * self.weight_dict["loss_dice"] +# ) +# best_loss_inds = torch.argmin(loss_combo, dim=-1) +# batch_inds = torch.arange(loss_combo.size(0), device=loss_combo.device) +# loss_mask = loss_multimask[batch_inds, best_loss_inds].unsqueeze(1) +# loss_dice = loss_multidice[batch_inds, best_loss_inds].unsqueeze(1) +# # calculate the iou prediction and slot losses only in the index +# # with the minimum loss for each mask (to be consistent w/ SAM) +# if self.supervise_all_iou: +# loss_iou = loss_multiiou.mean(dim=-1).unsqueeze(1) +# else: +# loss_iou = loss_multiiou[batch_inds, best_loss_inds].unsqueeze(1) +# else: +# loss_mask = loss_multimask +# loss_dice = loss_multidice +# loss_iou = loss_multiiou + +# # backprop focal, dice and iou loss only if obj present +# loss_mask = loss_mask * target_obj +# loss_dice = loss_dice * target_obj +# loss_iou = loss_iou * target_obj + +# # sum over batch dimension (note that the losses are already divided by num_boxes) +# losses["loss_mask"] += loss_mask.sum() +# losses["loss_dice"] += loss_dice.sum() +# losses["loss_iou"] += loss_iou.sum() +# losses["loss_class"] += loss_class + + +# class TextCriterion(LossWithWeights): +# def __init__( +# self, +# pad_token, +# max_seq_len=100, +# weight_dict=None, +# compute_aux=False, +# ): +# super().__init__(weight_dict, compute_aux) +# self.pad_token = pad_token +# self.max_seq_len = max_seq_len +# self.in_lengths = None + +# def get_loss(self, outputs, **kwargs): +# nb_tokens = outputs["captioning_tokenized_target"].input_ids.numel() +# bs, seq_len = outputs["captioning_tokenized_target"].input_ids.shape +# ce = F.cross_entropy( +# outputs["captioning_pred_text"].flatten(0, -2), +# outputs["captioning_tokenized_target"].input_ids.flatten(), +# ignore_index=self.pad_token, +# reduction="sum", +# ) + +# not_pad = ( +# outputs["captioning_tokenized_target"] +# .input_ids.reshape(-1) +# .ne(self.pad_token) +# ) + +# if nb_tokens > 0: +# nb_non_pad = not_pad.numel() +# ce = ce / nb_non_pad + +# preds = outputs["captioning_pred_text"].flatten(0, -2).argmax(-1)[not_pad] +# targets = outputs["captioning_tokenized_target"].input_ids.flatten()[not_pad] +# correct = preds == targets +# correct = correct.sum() / (correct.numel() + 1e-5) + +# correct_sequence_level = torch.all( +# ( +# outputs["captioning_pred_text"] +# .flatten(0, -2) +# .argmax(-1) +# .reshape(bs, seq_len) +# == outputs["captioning_tokenized_target"].input_ids +# ) +# | (~not_pad).view(bs, seq_len), +# dim=1, +# ) +# seq_level_acc = correct_sequence_level.float().mean() + +# return {"loss_text": ce, "text_acc": correct, "text_seq_acc": seq_level_acc} + + +def segment_miou(source, target): + """Compute the mean IoU between two sets of masks""" + assert source.shape == target.shape, "The two masks must have the same shape" + assert source.ndim == 3, "The masks must be 3D" + + valid_targets = (target.sum(dim=(1, 2)) > 0).sum() + if valid_targets == 0: + return torch.tensor(1.0, device=source.device) + intersection = (source.bool() & target.bool()).sum(dim=(1, 2)) + union = (source.bool() | target.bool()).sum(dim=(1, 2)) + iou = intersection / (union + 1e-8) + return iou.sum() / valid_targets + + +class SemanticSegCriterion(LossWithWeights): + def __init__( + self, + weight_dict, + focal: bool = False, + focal_alpha: float = 0.6, + focal_gamma: float = 1.6, + downsample: bool = True, + presence_head: bool = False, + # Option to turn off presence loss, if some other component + # is already doing it, e.g. decoder - in which case, + # we could still set presence_head to True so that + # losses are not propogated to masks when there is no GT mask + presence_loss: bool = True, + ): + super().__init__(weight_dict, False) + self.focal = focal + self.focal_alpha = focal_alpha + self.focal_gamma = focal_gamma + self.downsample = downsample + self.presence_head = presence_head + self.presence_loss = presence_loss + + def get_loss(self, out_dict, targets): + outputs = out_dict["semantic_seg"] + presence_logit = out_dict["presence_logit"] + if ( + "semantic_masks" in targets + and targets["semantic_masks"] is not None + and targets["semantic_masks"].size(0) > 0 + ): + semantic_targets = targets["semantic_masks"] + with torch.no_grad(): + if self.downsample: + # downsample targets to the size of predictions + size = outputs.shape[-2:] + semantic_targets = ( + F.interpolate( + semantic_targets.float().unsqueeze(1), + size=size, + mode="bilinear", + align_corners=False, + ) + .squeeze(1) + .bool() + ) + else: + with torch.no_grad(): + if self.downsample: + # downsample targets to the size of predictions + size = outputs.shape[-2:] + segments = ( + F.interpolate( + targets["masks"].float().unsqueeze(1), + size=size, + mode="bilinear", + align_corners=False, + ) + .squeeze(1) + .bool() + ) + else: + segments = targets["masks"].bool() + + # the annotations are for instance segmentation, so we merge them to get semantic segmentation + semantic_targets = instance_masks_to_semantic_masks( + segments, targets["num_boxes"] + ) + + if not self.downsample: + # upsample predictions to the target size + size = semantic_targets.shape[-2:] + outputs = F.interpolate( + outputs.float(), + size=size, + mode="bilinear", + align_corners=False, + ) + + if self.focal: + loss = sigmoid_focal_loss( + outputs.squeeze(1).flatten(-2), + semantic_targets.float().flatten(-2), + num_boxes=len(semantic_targets), + alpha=self.focal_alpha, + gamma=self.focal_gamma, + reduce=not self.presence_head, + ) + if self.presence_head: + loss = loss.mean(1) + else: + loss = F.binary_cross_entropy_with_logits( + outputs.squeeze(1), + semantic_targets.float(), + reduction="none" if self.presence_head else "mean", + ) + if self.presence_head: + loss = loss.flatten(1).mean(1) + + loss_dice = dice_loss( + outputs.squeeze(1).flatten(1), + semantic_targets.flatten(1), + len(semantic_targets), + reduce=not self.presence_head, + ) + + miou = segment_miou(outputs.sigmoid().squeeze(1) > 0.5, semantic_targets) + + loss_dict = {} + + if self.presence_head: + presence_target = semantic_targets.flatten(1).any(-1) + if self.presence_loss: + loss_presence = F.binary_cross_entropy_with_logits( + presence_logit.flatten(), + presence_target.float(), + ) + presence_acc = ( + ((presence_logit.flatten().sigmoid() > 0.5) == presence_target) + .float() + .mean() + ) + else: + # Dummy values + loss_presence = torch.tensor(0.0, device=loss.device) + # Whichever component is computing the presence loss, + # should also track presence_acc + presence_acc = torch.tensor(0.0, device=loss.device) + + loss_dict["loss_semantic_presence"] = loss_presence + loss_dict["presence_acc"] = presence_acc + + # reduce the other losses, skipping the negative ones + bs = loss.shape[0] + assert presence_target.numel() == bs + + mask = presence_target + nb_valid = presence_target.sum().item() + + loss = (loss * mask.float()).sum() / (nb_valid + 1e-6) + loss_dice = (loss_dice * mask.float()).sum() / (nb_valid + 1e-6) + + loss_dict.update( + { + "loss_semantic_seg": loss, + "loss_semantic_dice": loss_dice, + "miou_semantic_seg": miou, + } + ) + + return loss_dict + + +class Det2TrkAssoc(LossWithWeights): + def __init__( + self, + weight_dict, + use_fp_loss=False, + fp_loss_on_exhaustive_only=True, + treat_fp_as_new_obj=False, + ): + super().__init__(weight_dict, compute_aux=False) + self.use_fp_loss = use_fp_loss + self.fp_loss_on_exhaustive_only = fp_loss_on_exhaustive_only + self.treat_fp_as_new_obj = treat_fp_as_new_obj + if self.use_fp_loss: + self.target_keys.append("is_exhaustive") + + def get_loss(self, outputs, targets, indices, num_boxes): + det2trk_assoc_logits = outputs["det2trk_assoc_logits"] + device = det2trk_assoc_logits.device + B, Q_det, Q_trk_plus_2 = det2trk_assoc_logits.shape + assert Q_trk_plus_2 >= 2 + Q_trk = Q_trk_plus_2 - 2 + + # We only apply association losses to those detection queries that either match + # a GT instance or have score > 0 (i.e. those TP, FN and FP detection queries) + matched_object_ids = outputs["matched_object_ids"] + assert matched_object_ids.shape == (B, Q_det + Q_trk) + matched_obj_ids_det = matched_object_ids[:, :Q_det] + matched_obj_ids_trk = matched_object_ids[:, Q_det:] + det_is_matched_to_gt = matched_obj_ids_det >= 0 + trk_is_matched_to_gt = matched_obj_ids_trk >= 0 + + # note: -1 label is ignored in the (softmax) cross_entropy loss below + det2trk_assoc_labels = -torch.ones(B, Q_det, dtype=torch.long, device=device) + # a) If a detection query is matched to a same object ID as a tracking query, + # we assign it the index of the tracking query as a label + det_is_same_obj_id_as_trk = ( + det_is_matched_to_gt[:, :, None] + & trk_is_matched_to_gt[:, None, :] + & (matched_obj_ids_det[:, :, None] == matched_obj_ids_trk[:, None, :]) + ) + batch_idx, det_idx, trk_idx = det_is_same_obj_id_as_trk.nonzero(as_tuple=True) + det2trk_assoc_labels[batch_idx, det_idx] = trk_idx + + # b) If a detection query is matched to GT but not to any tracking query, + # we assign it a "new_object" label + det_is_new_obj = det_is_matched_to_gt & ~det_is_same_obj_id_as_trk.any(dim=-1) + det2trk_assoc_labels[det_is_new_obj] = Q_trk + + # c) If a detection query is not matched to GT but have score > 0, + # we assign it a "false_positive" label + if self.use_fp_loss: + det_is_above_thresh = outputs["pred_logits"][:, :Q_det].squeeze(2) > 0 + det_is_fp = ~det_is_matched_to_gt & det_is_above_thresh + if self.treat_fp_as_new_obj: + det2trk_assoc_labels[det_is_fp] = Q_trk + else: + if self.fp_loss_on_exhaustive_only: + # only count FP detections on batches that are exhaustively annotated + det_is_fp &= targets["is_exhaustive"].unsqueeze(1).bool() + det2trk_assoc_labels[det_is_fp] = Q_trk + 1 + + # softmax cross-entropy loss for detection-to-tracking association + loss_det2trk_assoc = F.cross_entropy( + input=det2trk_assoc_logits.flatten(0, 1), # (B * Q_det, Q_trk + 2) + target=det2trk_assoc_labels.flatten(0, 1), # (B * Q_det) + ignore_index=-1, + reduction="none", + ).view(B, Q_det) + # skip det2trk assocation loss on frames w/o any (non-padding) tracking queries + frame_has_valid_trk = trk_is_matched_to_gt.any(dim=-1, keepdims=True) # (B, 1) + loss_det2trk_assoc = loss_det2trk_assoc * frame_has_valid_trk.float() + + loss_det2trk_assoc = loss_det2trk_assoc.sum() / (B * num_boxes) + return {"loss_det2trk_assoc": loss_det2trk_assoc} + + +class TrackingByDetectionAssoc(LossWithWeights): + def __init__(self, weight_dict): + super().__init__(weight_dict, compute_aux=False, supports_o2m_loss=False) + assert "loss_det2trk_assoc" in self.weight_dict + assert "loss_trk2det_assoc" in self.weight_dict + + def get_loss(self, outputs, targets, indices, num_boxes): + # Part A: gather object id matching between detection and tracking + det2trk_assoc_logits = outputs["det2trk_assoc_logits"] # (B, Q_det+1, Q_trk+1) + B, Q_det_plus_1, Q_trk_plus_1 = det2trk_assoc_logits.shape + assert Q_det_plus_1 >= 1 and Q_trk_plus_1 >= 1 + Q_det = Q_det_plus_1 - 1 + Q_trk = Q_trk_plus_1 - 1 + device = det2trk_assoc_logits.device + + matched_obj_ids_det = outputs["matched_object_ids"] + assert matched_obj_ids_det.shape == (B, Q_det) + det_is_matched_to_gt = matched_obj_ids_det >= 0 + matched_obj_ids_trk = outputs["prev_trk_object_ids"] + assert matched_obj_ids_trk.shape == (B, Q_trk) + trk_is_matched_to_gt = matched_obj_ids_trk >= 0 + frame_has_valid_trk = trk_is_matched_to_gt.any(dim=-1, keepdims=True) # (B, 1) + + # check whether a detection object is the same as a tracking object + det_is_same_obj_id_as_trk = ( + det_is_matched_to_gt[:, :, None] + & trk_is_matched_to_gt[:, None, :] + & (matched_obj_ids_det[:, :, None] == matched_obj_ids_trk[:, None, :]) + ) # (B, Q_det, Q_trk) + # there should be at most one match for each detection and each previous tracked object + torch._assert_async(torch.all(det_is_same_obj_id_as_trk.sum(dim=2) <= 1)) + torch._assert_async(torch.all(det_is_same_obj_id_as_trk.sum(dim=1) <= 1)) + batch_idx, det_idx, trk_idx = det_is_same_obj_id_as_trk.nonzero(as_tuple=True) + + # Part B: Detection-to-tracking association loss + # assign detection-to-tracking labels (note: -1 label is ignored in the loss below) + det2trk_assoc_labels = -torch.ones(B, Q_det, dtype=torch.long, device=device) + det2trk_assoc_labels[batch_idx, det_idx] = trk_idx + # if a detection is matched to GT but not to any tracking, assign it a "new-object" label + det_is_new_obj = det_is_matched_to_gt & ~det_is_same_obj_id_as_trk.any(dim=2) + det2trk_assoc_labels[det_is_new_obj] = Q_trk # "Q_trk" label is "new-object" + + # softmax cross-entropy loss for detection-to-tracking association + loss_det2trk_assoc = F.cross_entropy( + input=det2trk_assoc_logits[:, :-1].flatten(0, 1), # (B*Q_det, Q_trk+1) + target=det2trk_assoc_labels.flatten(0, 1), # (B*Q_det) + ignore_index=-1, + reduction="none", + ).view(B, Q_det) + # skip det2trk assocation loss on frames w/o any (non-padding) tracking queries + loss_det2trk_assoc = loss_det2trk_assoc * frame_has_valid_trk.float() + loss_det2trk_assoc = loss_det2trk_assoc.sum() / (B * num_boxes) + loss_dict = {"loss_det2trk_assoc": loss_det2trk_assoc} + + # Part C: tracking-to-detection association loss + trk2det_assoc_logits = det2trk_assoc_logits.transpose(1, 2) + assert trk2det_assoc_logits.shape == (B, Q_trk + 1, Q_det + 1) + # assign tracking-to-detection labels (note: -1 label is ignored in the loss below) + trk2det_assoc_labels = -torch.ones(B, Q_trk, dtype=torch.long, device=device) + trk2det_assoc_labels[batch_idx, trk_idx] = det_idx + # if a tracking is matched to GT but not to any detection, assign it a "occluded" label + trk_is_occluded = trk_is_matched_to_gt & ~det_is_same_obj_id_as_trk.any(dim=1) + trk2det_assoc_labels[trk_is_occluded] = Q_det # "Q_det" label is "occluded" + + # softmax cross-entropy loss for tracking-to-detection association + loss_trk2det_assoc = F.cross_entropy( + input=trk2det_assoc_logits[:, :-1].flatten(0, 1), # (B*Q_trk, Q_det+1) + target=trk2det_assoc_labels.flatten(0, 1), # (B*Q_trk) + ignore_index=-1, + reduction="none", + ).view(B, Q_trk) + # skip trk2det association loss on frames w/o any (non-padding) tracking queries + loss_trk2det_assoc = loss_trk2det_assoc * frame_has_valid_trk.float() + loss_trk2det_assoc = loss_trk2det_assoc.sum() / (B * num_boxes) + loss_dict["loss_trk2det_assoc"] = loss_trk2det_assoc + + return loss_dict + + +def _keep_only_trk_queries_in_match_inds(inds, Q_det): + """Keep only the tracking query indices in the indices tuple""" + batch_idx, src_idx, tgt_idx = inds + if batch_idx.numel() == 0: + return (batch_idx, src_idx, tgt_idx) # empty indices, nothing to filter + + # keep only the tracking query indices + is_trk_query = src_idx >= Q_det + batch_idx_trk = batch_idx[is_trk_query] + src_idx_trk = src_idx[is_trk_query] + tgt_idx_trk = tgt_idx[is_trk_query] if tgt_idx is not None else None + return (batch_idx_trk, src_idx_trk, tgt_idx_trk) diff --git a/detect_tools/sam3/sam3/train/loss/mask_sampling.py b/detect_tools/sam3/sam3/train/loss/mask_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..aeba3fe7ff3ede863a3b3135b036220074010243 --- /dev/null +++ b/detect_tools/sam3/sam3/train/loss/mask_sampling.py @@ -0,0 +1,113 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from typing import Callable + +import torch +from torch.nn import functional as F + + +# Adapted from https://github.com/facebookresearch/detectron2/blob/main/projects/PointRend/point_rend/point_features.py +def point_sample(input, point_coords, **kwargs): + """ + A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors. + Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside + [0, 1] x [0, 1] square. + + Args: + input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid. + point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains + [0, 1] x [0, 1] normalized point coordinates. + + Returns: + output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains + features for points in `point_coords`. The features are obtained via bilinear + interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`. + """ + add_dim = False + if point_coords.dim() == 3: + add_dim = True + point_coords = point_coords.unsqueeze(2) + normalized_point_coords = 2.0 * point_coords - 1.0 # Normalize to [-1,1] + output = F.grid_sample(input, normalized_point_coords, **kwargs) + if add_dim: + output = output.squeeze(3) + return output + + +# Adapted from https://github.com/facebookresearch/detectron2/blob/main/projects/PointRend/point_rend/point_features.py +def get_uncertain_point_coords_with_randomness( + logits: torch.Tensor, + uncertainty_func: Callable, + num_points: int, + oversample_ratio: int, + importance_sample_ratio: float, +) -> torch.Tensor: + """ + Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties + are calculated for each point using 'uncertainty_func' function that takes point's logit + prediction as input. + See PointRend paper for details. + + Args: + logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for + class-specific or class-agnostic prediction. + uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that + contains logit predictions for P points and returns their uncertainties as a Tensor of + shape (N, 1, P). + num_points (int): The number of points P to sample. + oversample_ratio (int): Oversampling parameter. + importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. + + Returns: + point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P + sampled points. + """ + assert oversample_ratio >= 1 + assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0 + num_boxes = logits.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand(num_boxes, num_sampled, 2, device=logits.device) + point_logits = point_sample(logits, point_coords, align_corners=False) + # It is crucial to calculate uncertainty based on the sampled prediction value for the points. + # Calculating uncertainties of the predictions first and sampling them for points leads + # to incorrect results. + # To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between + # two predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value. + # However, if we calculate uncertainties for the predictions first, + # both will have -1 uncertainty, and the sampled point will get -1 uncertainty. + point_uncertainties = uncertainty_func(point_logits) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + # Flatten the indices + shift = num_sampled * torch.arange( + num_boxes, dtype=torch.long, device=logits.device + ) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + num_boxes, num_uncertain_points, 2 + ) + if num_random_points > 0: + point_coords = torch.cat( + [ + point_coords, + torch.rand(num_boxes, num_random_points, 2, device=logits.device), + ], + dim=1, + ) + return point_coords + + +# Adapted from https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/criterion.py +def calculate_uncertainty(logits: torch.Tensor) -> torch.Tensor: + """ + Estimates uncerainty as L1 distance between 0.0 and the logit prediction. + Args: + logits (Tensor): A tensor of shape (R, 1, ...) for class-agnostic + predicted masks + Returns: + scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + assert logits.shape[1] == 1 + return -(torch.abs(logits)) diff --git a/detect_tools/sam3/sam3/train/loss/sam3_loss.py b/detect_tools/sam3/sam3/train/loss/sam3_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..7ef59dc9e00206aa51efb44391e81ea5d3d665e9 --- /dev/null +++ b/detect_tools/sam3/sam3/train/loss/sam3_loss.py @@ -0,0 +1,203 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import torch + +from sam3.model.model_misc import SAM3Output + +from sam3.train.utils.distributed import get_world_size + +from .loss_fns import CORE_LOSS_KEY, Det2TrkAssoc, Masks + + +class DummyLoss(torch.nn.Module): + """A dummy loss that always returns 0 (as a placeholder for eval)""" + + def __init__( + self, + core_loss_key: str = CORE_LOSS_KEY, + device: str = "cuda", + **kwargs, + ): + super().__init__() + self.core_loss_key = core_loss_key + self.device = torch.device(device) + + def forward(self, *args, **kwargs): + return {self.core_loss_key: torch.tensor(0.0, device=self.device)} + + def accumulate(self, out_dict): + """ + Called by iterative losses. + """ + if self.core_loss_key not in out_dict: + out_dict[self.core_loss_key] = torch.tensor(0.0, device=self.device) + return out_dict + + +class Sam3LossWrapper(torch.nn.Module): + def __init__( + self, + loss_fns_find, + normalization="global", + matcher=None, + o2m_matcher=None, + o2m_weight=1.0, + use_o2m_matcher_on_o2m_aux=True, + loss_fn_semantic_seg=None, + normalize_by_valid_object_num=False, + normalize_by_stage_num=False, + scale_by_find_batch_size=False, + ): + super().__init__() + self.loss_fns_find = loss_fns_find + assert normalization in ["global", "local", "none"] + self.normalization = normalization + self.normalize_by_valid_object_num = normalize_by_valid_object_num + self.normalize_by_stage_num = normalize_by_stage_num + self.matcher = matcher + self.o2m_matcher = o2m_matcher + self.o2m_weight = o2m_weight + # whether to use the o2m matcher on the o2m queries in auxiliary outputs + self.use_o2m_matcher_on_o2m_aux = use_o2m_matcher_on_o2m_aux + self.loss_fn_semantic_seg = loss_fn_semantic_seg + self.scale_by_find_batch_size = scale_by_find_batch_size + + def _get_num_boxes(self, targets): + # the average number of target boxes for loss normalization + if self.normalize_by_valid_object_num: + # valid boxes are those with non-zero height and width + # (while padded invisible boxes are ) + boxes_hw = targets["boxes"].view(-1, 4) # cx, cy, w, h + num_boxes = (boxes_hw[:, 2:] > 0).all(dim=-1).sum().float() + else: + num_boxes = targets["num_boxes"].sum().float() + if self.normalization == "global": + torch.distributed.all_reduce(num_boxes) + num_boxes = torch.clamp(num_boxes / get_world_size(), min=1) + elif self.normalization == "local": + num_boxes = torch.clamp(num_boxes, min=1) + elif self.normalization == "none": + num_boxes = 1 + return num_boxes + + def compute_loss(self, nested_out, targets): + num_boxes = self._get_num_boxes(targets) + o2m_out_is_valid = nested_out.get("o2m_out_is_valid", None) + o2m_target_is_valid_padded = nested_out.get("o2m_target_is_valid_padded", None) + + # Get a list of outputs, including auxiliary and first stage outputs + output_list = [(nested_out, "", False)] # (out, suffix, is_aux) + if "aux_outputs" in nested_out: + output_list.extend( + (aux_out, f"_aux_{i}", True) + for i, aux_out in enumerate(nested_out["aux_outputs"]) + ) + if "first_stage" in nested_out: + output_list.append((nested_out["first_stage"], "_fs", True)) + + # Compute all the requested losses + losses = {} + total_core_loss = 0.0 + for out, suffix, is_aux in output_list: + # o2o matcher indices need to be computed by the model (as the video model requires + # a specific way of matching free and locked indices beyond just calling the matcher) + indices = out["indices"] + has_o2m_out = "pred_logits_o2m" in out + if has_o2m_out: + o2m_out = { + k[: -len("_o2m")]: v for k, v in out.items() if k.endswith("_o2m") + } + # o2m targets are the same as the o2o targets (assuming repeat=1) + o2m_targets = targets + if self.use_o2m_matcher_on_o2m_aux or not is_aux: + o2m_indices = self.o2m_matcher( + o2m_out, + o2m_targets, + out_is_valid=o2m_out_is_valid, + target_is_valid_padded=o2m_target_is_valid_padded, + ) + else: + o2m_indices = self.matcher( + o2m_out, + o2m_targets, + out_is_valid=o2m_out_is_valid, + target_is_valid_padded=o2m_target_is_valid_padded, + ) + + for loss_fn in self.loss_fns_find: + l_dict = loss_fn( + outputs=out, + targets=targets, + indices=indices, + num_boxes=num_boxes, + is_aux=is_aux, + ) + total_core_loss += l_dict.pop(CORE_LOSS_KEY) + losses.update({f"{k}{suffix}": v for k, v in l_dict.items()}) + + compute_o2m_loss = has_o2m_out + # a special handling to allow turning off mask loss in o2m + # (to be compatible with the original implementation) + if isinstance(loss_fn, Masks): + compute_o2m_loss = compute_o2m_loss and "pred_masks" in o2m_out + if isinstance(loss_fn, Det2TrkAssoc): + compute_o2m_loss = False # Det2TrkAssoc does not support o2m + if compute_o2m_loss: + l_dict = loss_fn( + outputs=o2m_out, + targets=o2m_targets, + indices=o2m_indices, + num_boxes=num_boxes, + is_aux=is_aux, + ) + for k in l_dict: + l_dict[k] *= self.o2m_weight + total_core_loss += l_dict.pop(CORE_LOSS_KEY) + losses.update({f"{k}{suffix}_o2m": v for k, v in l_dict.items()}) + + losses[CORE_LOSS_KEY] = total_core_loss + return losses + + def forward(self, find_stages: SAM3Output, find_targets): + if find_stages.loss_stages is not None: + find_targets = [find_targets[i] for i in find_stages.loss_stages] + with SAM3Output.iteration_mode( + find_stages, iter_mode=SAM3Output.IterMode.ALL_STEPS_PER_STAGE + ) as find_stages: + assert len(find_stages) == len(find_targets) + total_losses = {} + for stage_outputs, stage_targets in zip(find_stages, find_targets): + stage_targets = [stage_targets] * len(stage_outputs) + # If there are multiple steps within a stage, compute the loss for all of them (e.g. interactivity) + for outputs, targets in zip(stage_outputs, stage_targets): + cur_losses = self.compute_loss(outputs, targets) + + if self.loss_fn_semantic_seg is not None: + cur_losses_semantic = self.loss_fn_semantic_seg( + outputs, targets + ) + cur_losses[CORE_LOSS_KEY] += cur_losses_semantic.pop( + CORE_LOSS_KEY + ) + # make sure the semantic losses don't overlap with the find losses + assert set(cur_losses).isdisjoint(set(cur_losses_semantic)) + cur_losses.update(cur_losses_semantic) + + # Optionally, normalize the loss by the number of find stages (training video frames) so that + # image batches and video batches have similar loss scales. (Otherwise video batches would + # have a much higher loss scale due to summing the losses over all the find stages.) + if self.normalize_by_stage_num: + cur_losses[CORE_LOSS_KEY] /= len(find_stages) + + if self.scale_by_find_batch_size: + bs = targets["num_boxes"].shape[0] + # sqrt scaling based on the "effective" batch size + cur_losses[CORE_LOSS_KEY] *= bs**0.5 + + for k, v in cur_losses.items(): + if k not in total_losses: + total_losses[k] = v + else: + total_losses[k] += v + + return total_losses diff --git a/detect_tools/sam3/sam3/train/loss/sigmoid_focal_loss.py b/detect_tools/sam3/sam3/train/loss/sigmoid_focal_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..15e6db43d48488f35d5ba28ea13d47348315ff46 --- /dev/null +++ b/detect_tools/sam3/sam3/train/loss/sigmoid_focal_loss.py @@ -0,0 +1,321 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Triton kernel for faster and memory efficient sigmoid focal loss""" + +import torch +import triton +import triton.language as tl +from torch._inductor.runtime.triton_helpers import libdevice + +""" + +The sigmoid focal loss is defined as: + + prob = inputs.sigmoid() + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * ce_loss * ((1 - p_t) ** gamma) + +Where alpha and gamma are scalar parameters, inputs are the logits, targets the float targets. + +We implement two versions of the sigmoid focal loss: with and without sum reduction. +The latter is implemented with built-in reduction to avoid materializing wrt the output of the loss. +This can help save a bit of peak memory. + +The reduction version is implemented using somewhat of a hack. Pytorch's generated kernels usually do the point-wise operation in a first kernel, and implement the reduction another kernel launched on a grid of size 1, where the reduction happens as a for loop in the triton kernel. +Since we want to fuse those two kernels, that is not a good idea: we'd have to launch the overall kernel on a grid of size 1, which is obviously inefficient. +On the other hand, typical CUDA algorithms for reduction (eg reduction tree) are hard to implement in triton due to the lack of thread sync primitives. +We settle for a version that abuses triton's atomic_add: we can have all threads simply add to the same location. +In practice, this is not good, since it creates a massive bottleneck on the semaphore for that single memory location. So instead, we create M reduction locations. Each thread will simply write to thread_id%M. The python code can finally sum over the M reductions. +M = 32 works fine in benchmarking tests. The forward is a tiny bit slower compared to the non-reduced kernel, but the backward breaks even due to one less memory allocation. +""" + + +@triton.jit +def _inner_focal_loss_fwd(inputs, targets, alpha, gamma): + inv_targets = 1 - targets + # Sigmoid + sig = tl.sigmoid(inputs) + + # Binary cross entropy with logits + # In practice, we want the following: + # bce_loss = -targets * tl.log(sig) - (1 - targets) * tl.log(1 - sig) + # However, the above is not numerically stable. + # We're also not directly taking the sum here, so the usual log-sum-exp trick doesn't apply + # The bce can be reformulated, after algebraic manipulation, to + # bce_loss = log(1 + exp(-x)) + x * (1-y) + # This is still not stable, because for large (-x) the exponential will blow up. + # We'll use the following alternate formulation: + # bce_loss = max(x, 0) - x * y + log(1 + exp(-abs(x))) + # Let's show that it's equivalent: + # Case x>=0: abs(x) = x , max(x, 0) = x + # so we get x - x * y + log(1 + exp(-x)) which is equivalent + # Case x<0: abs(x) = -x, max(x, 0) = 0 + # we have log(1 + exp(-abs(x))) = log(1 + exp(x)) = log(exp(x)(1 + exp(-x))) = x+log(1 + exp(-x)) + # plugging it in, we get + # 0 - x * y + x + log(1 + exp(-x)), which is also equivalent + # Note that this is stable because now the exponent are guaranteed to be below 0. + max_val = tl.clamp(inputs, min=0, max=1e9) + bce_loss = max_val - inputs * targets + tl.log(1 + tl.exp(-tl.abs(inputs))) + + # Modulating factor + p_t = sig * targets + (1 - sig) * inv_targets + mod_factor = libdevice.pow(1 - p_t, gamma) + + # Alpha factor + alpha_t = alpha * targets + (1 - alpha) * inv_targets + + # Final loss calculation + return alpha_t * mod_factor * bce_loss + + +# Non-reduced version +@triton.jit +def sigmoid_focal_loss_fwd_kernel( + inputs_ptr, + targets_ptr, + loss_ptr, + alpha: float, + gamma: float, + n_elements: int, + BLOCK_SIZE: tl.constexpr, +): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offset = block_start + tl.arange(0, BLOCK_SIZE) + mask = offset < n_elements + + # Load data + inputs = tl.load(inputs_ptr + offset, mask=mask).to(tl.float32) + targets = tl.load(targets_ptr + offset, mask=mask) + + final_loss = _inner_focal_loss_fwd(inputs, targets, alpha, gamma) + + # Store result + tl.store(loss_ptr + offset, final_loss, mask=mask) + + +# version with reduction +@triton.jit +def sigmoid_focal_loss_fwd_kernel_reduce( + inputs_ptr, + targets_ptr, + loss_ptr, + alpha: float, + gamma: float, + n_elements: int, + BLOCK_SIZE: tl.constexpr, + REDUCE_SIZE: tl.constexpr, +): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + reduce_loc = pid % REDUCE_SIZE + offset = block_start + tl.arange(0, BLOCK_SIZE) + mask = offset < n_elements + # Load data + inputs = tl.load(inputs_ptr + offset, mask=mask).to(tl.float32) + targets = tl.load(targets_ptr + offset, mask=mask) + + final_loss = _inner_focal_loss_fwd(inputs, targets, alpha, gamma) * mask + + fl = tl.sum(final_loss) + + # Store result + tl.atomic_add(loss_ptr + reduce_loc, fl) + + +@triton.jit +def _inner_focal_loss_bwd(inputs, targets, alpha, gamma): + inv_targets = 1 - targets + + # Recompute forward + max_val = tl.clamp(inputs, min=0, max=1e9) + bce_loss = max_val - inputs * targets + tl.log(1 + tl.exp(-tl.abs(inputs))) + + # Sigmoid + sig = tl.sigmoid(inputs) + inv_sig = 1 - sig + + # Modulating factor + p_t = sig * targets + inv_sig * inv_targets + tmp = libdevice.pow(1 - p_t, gamma - 1) + mod_factor = tmp * (1 - p_t) + + # Alpha factor + alpha_t = alpha * targets + (1 - alpha) * inv_targets + + # Now computing the derivatives + d_pt = (2 * targets - 1) * sig * inv_sig + d_mod_factor = -gamma * d_pt * tmp + + d_bce_loss = sig - targets + + return alpha_t * (d_bce_loss * mod_factor + d_mod_factor * bce_loss) + + +@triton.jit +def sigmoid_focal_loss_bwd_kernel( + inputs_ptr, + targets_ptr, + grad_inputs_ptr, + grad_out_ptr, + alpha: float, + gamma: float, + n_elements: int, + BLOCK_SIZE: tl.constexpr, +): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offset = block_start + tl.arange(0, BLOCK_SIZE) + mask = offset < n_elements + input_ptrs = inputs_ptr + offset + target_ptrs = targets_ptr + offset + grad_input_ptrs = grad_inputs_ptr + offset + grad_out_ptrs = grad_out_ptr + offset + # Load data + inputs = tl.load(input_ptrs, mask=mask).to(tl.float32) + targets = tl.load(target_ptrs, mask=mask) + grad_out = tl.load(grad_out_ptrs, mask=mask) + d_loss = grad_out * _inner_focal_loss_bwd(inputs, targets, alpha, gamma) + tl.store(grad_input_ptrs, d_loss, mask=mask) + + +@triton.jit +def sigmoid_focal_loss_bwd_kernel_reduce( + inputs_ptr, + targets_ptr, + grad_inputs_ptr, + grad_out_ptr, + alpha: float, + gamma: float, + n_elements: int, + BLOCK_SIZE: tl.constexpr, +): + # The only difference is that the gradient is now a single scalar + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offset = block_start + tl.arange(0, BLOCK_SIZE) + mask = offset < n_elements + input_ptrs = inputs_ptr + offset + target_ptrs = targets_ptr + offset + grad_input_ptrs = grad_inputs_ptr + offset + # Load data + inputs = tl.load(input_ptrs, mask=mask).to(tl.float32) + targets = tl.load(target_ptrs, mask=mask) + grad_out = tl.load(grad_out_ptr) + d_loss = grad_out * _inner_focal_loss_bwd(inputs, targets, alpha, gamma) + tl.store(grad_input_ptrs, d_loss, mask=mask) + + +class SigmoidFocalLoss(torch.autograd.Function): + BLOCK_SIZE = 256 + + @staticmethod + def forward(ctx, inputs, targets, alpha=0.25, gamma=2): + n_elements = inputs.numel() + assert targets.numel() == n_elements + input_shape = inputs.shape + inputs = inputs.view(-1).contiguous() + targets = targets.view(-1).contiguous() + loss = torch.empty(inputs.shape, dtype=torch.float32, device=inputs.device) + grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) + sigmoid_focal_loss_fwd_kernel[grid]( + inputs, targets, loss, alpha, gamma, n_elements, SigmoidFocalLoss.BLOCK_SIZE + ) + ctx.save_for_backward(inputs.view(input_shape), targets.view(input_shape)) + ctx.alpha = alpha + ctx.gamma = gamma + return loss.view(input_shape) + + @staticmethod + def backward(ctx, grad_output): + inputs, targets = ctx.saved_tensors + alpha = ctx.alpha + gamma = ctx.gamma + n_elements = inputs.numel() + input_shape = inputs.shape + grad_inputs = torch.empty( + inputs.shape, dtype=grad_output.dtype, device=grad_output.device + ) + inputs_ptr = inputs.view(-1).contiguous() + targets_ptr = targets.view(-1).contiguous() + grad_output_ptr = grad_output.view(-1).contiguous() + grad_inputs_ptr = grad_inputs + assert grad_output.numel() == n_elements + grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) + sigmoid_focal_loss_bwd_kernel[grid]( + inputs_ptr, + targets_ptr, + grad_inputs_ptr, + grad_output_ptr, + alpha, + gamma, + n_elements, + SigmoidFocalLoss.BLOCK_SIZE, + ) + return grad_inputs.view(input_shape), None, None, None + + +triton_sigmoid_focal_loss = SigmoidFocalLoss.apply + + +class SigmoidFocalLossReduced(torch.autograd.Function): + BLOCK_SIZE = 256 + REDUCE_SIZE = 32 + + @staticmethod + def forward(ctx, inputs, targets, alpha=0.25, gamma=2): + n_elements = inputs.numel() + input_shape = inputs.shape + inputs = inputs.view(-1).contiguous() + targets = targets.view(-1).contiguous() + loss = torch.zeros( + SigmoidFocalLossReduced.REDUCE_SIZE, + device=inputs.device, + dtype=torch.float32, + ) + grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) + sigmoid_focal_loss_fwd_kernel_reduce[grid]( + inputs, + targets, + loss, + alpha, + gamma, + n_elements, + SigmoidFocalLossReduced.BLOCK_SIZE, + SigmoidFocalLossReduced.REDUCE_SIZE, + ) + ctx.save_for_backward(inputs.view(input_shape), targets.view(input_shape)) + ctx.alpha = alpha + ctx.gamma = gamma + return loss.sum() + + @staticmethod + def backward(ctx, grad_output): + inputs, targets = ctx.saved_tensors + alpha = ctx.alpha + gamma = ctx.gamma + n_elements = inputs.numel() + input_shape = inputs.shape + grad_inputs = torch.empty( + inputs.shape, dtype=grad_output.dtype, device=grad_output.device + ) + inputs_ptr = inputs.view(-1).contiguous() + targets_ptr = targets.reshape(-1).contiguous() + assert grad_output.numel() == 1 + grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) + sigmoid_focal_loss_bwd_kernel_reduce[grid]( + inputs_ptr, + targets_ptr, + grad_inputs, + grad_output, + alpha, + gamma, + n_elements, + SigmoidFocalLossReduced.BLOCK_SIZE, + ) + return grad_inputs.view(input_shape), None, None, None + + +triton_sigmoid_focal_loss_reduce = SigmoidFocalLossReduced.apply diff --git a/detect_tools/sam3/sam3/train/masks_ops.py b/detect_tools/sam3/sam3/train/masks_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d2fd7c23396722a738a92393b92621943708dd --- /dev/null +++ b/detect_tools/sam3/sam3/train/masks_ops.py @@ -0,0 +1,272 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +"""Utilities for masks manipulation""" + +import numpy as np +import pycocotools.mask as maskUtils +import torch +from pycocotools import mask as mask_util + + +def instance_masks_to_semantic_masks( + instance_masks: torch.Tensor, num_instances: torch.Tensor +) -> torch.Tensor: + """This function converts instance masks to semantic masks. + It accepts a collapsed batch of instances masks (ie all instance masks are concatenated in a single tensor) and + the number of instances in each image of the batch. + It returns a mask with the same spatial dimensions as the input instance masks, where for each batch element the + semantic mask is the union of all the instance masks in the batch element. + + If for a given batch element there are no instances (ie num_instances[i]==0), the corresponding semantic mask will be a tensor of zeros. + + Args: + instance_masks (torch.Tensor): A tensor of shape (N, H, W) where N is the number of instances in the batch. + num_instances (torch.Tensor): A tensor of shape (B,) where B is the batch size. It contains the number of instances + in each image of the batch. + + Returns: + torch.Tensor: A tensor of shape (B, H, W) where B is the batch size and H, W are the spatial dimensions of the + input instance masks. + """ + + masks_per_query = torch.split(instance_masks, num_instances.tolist()) + + return torch.stack([torch.any(masks, dim=0) for masks in masks_per_query], dim=0) + + +def mask_intersection(masks1, masks2, block_size=16): + """Compute the intersection of two sets of masks, without blowing the memory""" + + assert masks1.shape[1:] == masks2.shape[1:] + assert masks1.dtype == torch.bool and masks2.dtype == torch.bool + + result = torch.zeros( + masks1.shape[0], masks2.shape[0], device=masks1.device, dtype=torch.long + ) + for i in range(0, masks1.shape[0], block_size): + for j in range(0, masks2.shape[0], block_size): + intersection = ( + (masks1[i : i + block_size, None] * masks2[None, j : j + block_size]) + .flatten(-2) + .sum(-1) + ) + result[i : i + block_size, j : j + block_size] = intersection + return result + + +def mask_iom(masks1, masks2): + """ + Similar to IoU, except the denominator is the area of the smallest mask + """ + assert masks1.shape[1:] == masks2.shape[1:] + assert masks1.dtype == torch.bool and masks2.dtype == torch.bool + + # intersection = (masks1[:, None] * masks2[None]).flatten(-2).sum(-1) + intersection = mask_intersection(masks1, masks2) + area1 = masks1.flatten(-2).sum(-1) + area2 = masks2.flatten(-2).sum(-1) + min_area = torch.min(area1[:, None], area2[None, :]) + return intersection / (min_area + 1e-8) + + +def compute_boundary(seg): + """ + Adapted from https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/metrics/j_and_f.py#L148 + Return a 1pix wide boundary of the given mask + """ + assert seg.ndim >= 2 + e = torch.zeros_like(seg) + s = torch.zeros_like(seg) + se = torch.zeros_like(seg) + + e[..., :, :-1] = seg[..., :, 1:] + s[..., :-1, :] = seg[..., 1:, :] + se[..., :-1, :-1] = seg[..., 1:, 1:] + + b = seg ^ e | seg ^ s | seg ^ se + b[..., -1, :] = seg[..., -1, :] ^ e[..., -1, :] + b[..., :, -1] = seg[..., :, -1] ^ s[..., :, -1] + b[..., -1, -1] = 0 + return b + + +def dilation(mask, kernel_size): + """ + Implements the dilation operation. If the input is on cpu, we call the cv2 version. + Otherwise, we implement it using a convolution + + The kernel is assumed to be a square kernel + + """ + + assert mask.ndim == 3 + kernel_size = int(kernel_size) + assert ( + kernel_size % 2 == 1 + ), f"Dilation expects a odd kernel size, got {kernel_size}" + + if mask.is_cuda: + m = mask.unsqueeze(1).to(torch.float16) + k = torch.ones(1, 1, kernel_size, 1, dtype=m.dtype, device=m.device) + + result = torch.nn.functional.conv2d(m, k, padding="same") + result = torch.nn.functional.conv2d(result, k.transpose(-1, -2), padding="same") + return result.view_as(mask) > 0 + + all_masks = mask.view(-1, mask.size(-2), mask.size(-1)).numpy().astype(np.uint8) + kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8) + + import cv2 + + processed = [torch.from_numpy(cv2.dilate(m, kernel)) for m in all_masks] + return torch.stack(processed).view_as(mask).to(mask) + + +def compute_F_measure( + gt_boundary_rle, gt_dilated_boundary_rle, dt_boundary_rle, dt_dilated_boundary_rle +): + """Adapted from https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/metrics/j_and_f.py#L207 + + Assumes the boundary and dilated boundaries have already been computed and converted to RLE + """ + gt_match = maskUtils.merge([gt_boundary_rle, dt_dilated_boundary_rle], True) + dt_match = maskUtils.merge([dt_boundary_rle, gt_dilated_boundary_rle], True) + + n_dt = maskUtils.area(dt_boundary_rle) + n_gt = maskUtils.area(gt_boundary_rle) + # % Compute precision and recall + if n_dt == 0 and n_gt > 0: + precision = 1 + recall = 0 + elif n_dt > 0 and n_gt == 0: + precision = 0 + recall = 1 + elif n_dt == 0 and n_gt == 0: + precision = 1 + recall = 1 + else: + precision = maskUtils.area(dt_match) / float(n_dt) + recall = maskUtils.area(gt_match) / float(n_gt) + + # Compute F measure + if precision + recall == 0: + f_val = 0 + else: + f_val = 2 * precision * recall / (precision + recall) + + return f_val + + +@torch.no_grad() +def rle_encode(orig_mask, return_areas=False): + """Encodes a collection of masks in RLE format + + This function emulates the behavior of the COCO API's encode function, but + is executed partially on the GPU for faster execution. + + Args: + mask (torch.Tensor): A mask of shape (N, H, W) with dtype=torch.bool + return_areas (bool): If True, add the areas of the masks as a part of + the RLE output dict under the "area" key. Default is False. + + Returns: + str: The RLE encoded masks + """ + assert orig_mask.ndim == 3, "Mask must be of shape (N, H, W)" + assert orig_mask.dtype == torch.bool, "Mask must have dtype=torch.bool" + + if orig_mask.numel() == 0: + return [] + + # First, transpose the spatial dimensions. + # This is necessary because the COCO API uses Fortran order + mask = orig_mask.transpose(1, 2) + + # Flatten the mask + flat_mask = mask.reshape(mask.shape[0], -1) + if return_areas: + mask_areas = flat_mask.sum(-1).tolist() + # Find the indices where the mask changes + differences = torch.ones( + mask.shape[0], flat_mask.shape[1] + 1, device=mask.device, dtype=torch.bool + ) + differences[:, 1:-1] = flat_mask[:, :-1] != flat_mask[:, 1:] + differences[:, 0] = flat_mask[:, 0] + _, change_indices = torch.where(differences) + + try: + boundaries = torch.cumsum(differences.sum(-1), 0).cpu() + except RuntimeError as _: + boundaries = torch.cumsum(differences.cpu().sum(-1), 0) + + change_indices_clone = change_indices.clone() + # First pass computes the RLEs on GPU, in a flatten format + for i in range(mask.shape[0]): + # Get the change indices for this batch item + beg = 0 if i == 0 else boundaries[i - 1].item() + end = boundaries[i].item() + change_indices[beg + 1 : end] -= change_indices_clone[beg : end - 1] + + # Now we can split the RLES of each batch item, and convert them to strings + # No more gpu at this point + change_indices = change_indices.tolist() + + batch_rles = [] + # Process each mask in the batch separately + for i in range(mask.shape[0]): + beg = 0 if i == 0 else boundaries[i - 1].item() + end = boundaries[i].item() + run_lengths = change_indices[beg:end] + + uncompressed_rle = {"counts": run_lengths, "size": list(orig_mask.shape[1:])} + h, w = uncompressed_rle["size"] + rle = mask_util.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") + if return_areas: + rle["area"] = mask_areas[i] + batch_rles.append(rle) + + return batch_rles + + +def robust_rle_encode(masks): + """Encodes a collection of masks in RLE format. Uses the gpu version fist, falls back to the cpu version if it fails""" + + assert masks.ndim == 3, "Mask must be of shape (N, H, W)" + assert masks.dtype == torch.bool, "Mask must have dtype=torch.bool" + + try: + return rle_encode(masks) + except RuntimeError as _: + masks = masks.cpu().numpy() + rles = [ + mask_util.encode( + np.array(mask[:, :, np.newaxis], dtype=np.uint8, order="F") + )[0] + for mask in masks + ] + for rle in rles: + rle["counts"] = rle["counts"].decode("utf-8") + return rles + + +def ann_to_rle(segm, im_info): + """Convert annotation which can be polygons, uncompressed RLE to RLE. + Args: + ann (dict) : annotation object + Returns: + ann (rle) + """ + h, w = im_info["height"], im_info["width"] + if isinstance(segm, list): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = mask_util.frPyObjects(segm, h, w) + rle = mask_util.merge(rles) + elif isinstance(segm["counts"], list): + # uncompressed RLE + rle = mask_util.frPyObjects(segm, h, w) + else: + # rle + rle = segm + return rle diff --git a/detect_tools/sam3/sam3/train/matcher.py b/detect_tools/sam3/sam3/train/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b8d62a5279c0694fa079d004269f50996f76a1 --- /dev/null +++ b/detect_tools/sam3/sam3/train/matcher.py @@ -0,0 +1,806 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" + +import numpy as np +import torch + +from sam3.model.box_ops import box_cxcywh_to_xyxy, box_iou, generalized_box_iou +from scipy.optimize import linear_sum_assignment +from torch import nn + + +def _do_matching(cost, repeats=1, return_tgt_indices=False, do_filtering=False): + if repeats > 1: + cost = np.tile(cost, (1, repeats)) + + i, j = linear_sum_assignment(cost) + if do_filtering: + # filter out invalid entries (i.e. those with cost > 1e8) + valid_thresh = 1e8 + valid_ijs = [(ii, jj) for ii, jj in zip(i, j) if cost[ii, jj] < valid_thresh] + i, j = zip(*valid_ijs) if len(valid_ijs) > 0 else ([], []) + i, j = np.array(i, dtype=np.int64), np.array(j, dtype=np.int64) + if return_tgt_indices: + return i, j + order = np.argsort(j) + return i[order] + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__( + self, + cost_class: float = 1, + cost_bbox: float = 1, + cost_giou: float = 1, + focal_loss: bool = False, + focal_alpha: float = 0.25, + focal_gamma: float = 2, + ): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost + cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + self.norm = nn.Sigmoid() if focal_loss else nn.Softmax(-1) + assert ( + cost_class != 0 or cost_bbox != 0 or cost_giou != 0 + ), "all costs cant be 0" + self.focal_loss = focal_loss + self.focal_alpha = focal_alpha + self.focal_gamma = focal_gamma + + @torch.no_grad() + def forward(self, outputs, batched_targets): + """Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = self.norm( + outputs["pred_logits"].flatten(0, 1) + ) # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_bbox = batched_targets["boxes"] + + if "positive_map" in batched_targets: + # In this case we have a multi-hot target + positive_map = batched_targets["positive_map"] + assert len(tgt_bbox) == len(positive_map) + + if self.focal_loss: + positive_map = positive_map > 1e-4 + alpha = self.focal_alpha + gamma = self.focal_gamma + neg_cost_class = ( + (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) + ) + pos_cost_class = ( + alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) + ) + cost_class = ( + (pos_cost_class - neg_cost_class).unsqueeze(1) + * positive_map.unsqueeze(0) + ).sum(-1) + else: + # Compute the soft-cross entropy between the predicted token alignment and the GT one for each box + cost_class = -(out_prob.unsqueeze(1) * positive_map.unsqueeze(0)).sum( + -1 + ) + else: + # In this case we are doing a "standard" cross entropy + tgt_ids = batched_targets["labels"] + assert len(tgt_bbox) == len(tgt_ids) + + if self.focal_loss: + alpha = self.focal_alpha + gamma = self.focal_gamma + neg_cost_class = ( + (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) + ) + pos_cost_class = ( + alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) + ) + cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids] + else: + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be omitted. + cost_class = -out_prob[:, tgt_ids] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + assert cost_class.shape == cost_bbox.shape + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou( + box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox) + ) + + # Final cost matrix + C = ( + self.cost_bbox * cost_bbox + + self.cost_class * cost_class + + self.cost_giou * cost_giou + ) + C = C.view(bs, num_queries, -1).cpu().numpy() + + sizes = torch.cumsum(batched_targets["num_boxes"], -1)[:-1] + costs = [c[i] for i, c in enumerate(np.split(C, sizes.cpu().numpy(), axis=-1))] + indices = [_do_matching(c) for c in costs] + batch_idx = torch.as_tensor( + sum([[i] * len(src) for i, src in enumerate(indices)], []), dtype=torch.long + ) + src_idx = torch.from_numpy(np.concatenate(indices)).long() + return batch_idx, src_idx + + +class BinaryHungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__( + self, + cost_class: float = 1, + cost_bbox: float = 1, + cost_giou: float = 1, + ): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost + cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + self.norm = nn.Sigmoid() + assert ( + cost_class != 0 or cost_bbox != 0 or cost_giou != 0 + ), "all costs cant be 0" + + @torch.no_grad() + def forward(self, outputs, batched_targets, repeats=0, repeat_batch=1): + """Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + if repeat_batch != 1: + raise NotImplementedError("please use BinaryHungarianMatcherV2 instead") + + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = self.norm(outputs["pred_logits"].flatten(0, 1)).squeeze( + -1 + ) # [batch_size * num_queries] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_bbox = batched_targets["boxes"] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + + cost_class = -out_prob.unsqueeze(-1).expand_as(cost_bbox) + + assert cost_class.shape == cost_bbox.shape + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou( + box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox) + ) + + # Final cost matrix + C = ( + self.cost_bbox * cost_bbox + + self.cost_class * cost_class + + self.cost_giou * cost_giou + ) + C = C.view(bs, num_queries, -1).cpu().numpy() + + sizes = torch.cumsum(batched_targets["num_boxes"], -1)[:-1] + costs = [c[i] for i, c in enumerate(np.split(C, sizes.cpu().numpy(), axis=-1))] + return_tgt_indices = False + for c in costs: + n_targ = c.shape[1] + if repeats > 1: + n_targ *= repeats + if c.shape[0] < n_targ: + return_tgt_indices = True + break + if return_tgt_indices: + indices, tgt_indices = zip( + *( + _do_matching( + c, repeats=repeats, return_tgt_indices=return_tgt_indices + ) + for c in costs + ) + ) + tgt_indices = list(tgt_indices) + for i in range(1, len(tgt_indices)): + tgt_indices[i] += sizes[i - 1].item() + tgt_idx = torch.from_numpy(np.concatenate(tgt_indices)).long() + else: + indices = [_do_matching(c, repeats=repeats) for c in costs] + tgt_idx = None + + batch_idx = torch.as_tensor( + sum([[i] * len(src) for i, src in enumerate(indices)], []), dtype=torch.long + ) + src_idx = torch.from_numpy(np.concatenate(indices)).long() + return batch_idx, src_idx, tgt_idx + + +class BinaryFocalHungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__( + self, + cost_class: float = 1, + cost_bbox: float = 1, + cost_giou: float = 1, + alpha: float = 0.25, + gamma: float = 2.0, + stable: bool = False, + ): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost + cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + self.norm = nn.Sigmoid() + self.alpha = alpha + self.gamma = gamma + self.stable = stable + assert ( + cost_class != 0 or cost_bbox != 0 or cost_giou != 0 + ), "all costs cant be 0" + + @torch.no_grad() + def forward(self, outputs, batched_targets, repeats=1, repeat_batch=1): + """Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + if repeat_batch != 1: + raise NotImplementedError("please use BinaryHungarianMatcherV2 instead") + + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_score = outputs["pred_logits"].flatten(0, 1).squeeze(-1) + out_prob = self.norm(out_score) # [batch_size * num_queries] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_bbox = batched_targets["boxes"] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou( + box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox) + ) + + # cost_class = -out_prob.unsqueeze(-1).expand_as(cost_bbox) + if self.stable: + rescaled_giou = (-cost_giou + 1) / 2 + out_prob = out_prob.unsqueeze(-1).expand_as(cost_bbox) * rescaled_giou + cost_class = -self.alpha * (1 - out_prob) ** self.gamma * torch.log( + out_prob + ) + (1 - self.alpha) * out_prob**self.gamma * torch.log(1 - out_prob) + else: + # directly computing log sigmoid (more numerically stable) + log_out_prob = torch.nn.functional.logsigmoid(out_score) + log_one_minus_out_prob = torch.nn.functional.logsigmoid(-out_score) + cost_class = ( + -self.alpha * (1 - out_prob) ** self.gamma * log_out_prob + + (1 - self.alpha) * out_prob**self.gamma * log_one_minus_out_prob + ) + if not self.stable: + cost_class = cost_class.unsqueeze(-1).expand_as(cost_bbox) + + assert cost_class.shape == cost_bbox.shape + + # Final cost matrix + C = ( + self.cost_bbox * cost_bbox + + self.cost_class * cost_class + + self.cost_giou * cost_giou + ) + C = C.view(bs, num_queries, -1).cpu().numpy() + + sizes = torch.cumsum(batched_targets["num_boxes"], -1)[:-1] + costs = [c[i] for i, c in enumerate(np.split(C, sizes.cpu().numpy(), axis=-1))] + return_tgt_indices = False + for c in costs: + n_targ = c.shape[1] + if repeats > 1: + n_targ *= repeats + if c.shape[0] < n_targ: + return_tgt_indices = True + break + if return_tgt_indices: + indices, tgt_indices = zip( + *( + _do_matching( + c, repeats=repeats, return_tgt_indices=return_tgt_indices + ) + for c in costs + ) + ) + tgt_indices = list(tgt_indices) + for i in range(1, len(tgt_indices)): + tgt_indices[i] += sizes[i - 1].item() + tgt_idx = torch.from_numpy(np.concatenate(tgt_indices)).long() + else: + indices = [_do_matching(c, repeats=repeats) for c in costs] + tgt_idx = None + + batch_idx = torch.as_tensor( + sum([[i] * len(src) for i, src in enumerate(indices)], []), dtype=torch.long + ) + src_idx = torch.from_numpy(np.concatenate(indices)).long() + return batch_idx, src_idx, tgt_idx + + +class BinaryHungarianMatcherV2(nn.Module): + """ + This class computes an assignment between the targets and the predictions + of the network + + For efficiency reasons, the targets don't include the no_object. Because of + this, in general, there are more predictions than targets. In this case, we + do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + + This is a more efficient implementation of BinaryHungarianMatcher. + """ + + def __init__( + self, + cost_class: float = 1, + cost_bbox: float = 1, + cost_giou: float = 1, + focal: bool = False, + alpha: float = 0.25, + gamma: float = 2.0, + stable: bool = False, + remove_samples_with_0_gt: bool = True, + ): + """ + Creates the matcher + + Params: + - cost_class: Relative weight of the classification error in the + matching cost + - cost_bbox: Relative weight of the L1 error of the bounding box + coordinates in the matching cost + - cost_giou: This is the relative weight of the giou loss of the + bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + self.norm = nn.Sigmoid() + assert ( + cost_class != 0 or cost_bbox != 0 or cost_giou != 0 + ), "all costs cant be 0" + self.focal = focal + if focal: + self.alpha = alpha + self.gamma = gamma + self.stable = stable + self.remove_samples_with_0_gt = remove_samples_with_0_gt + + @torch.no_grad() + def forward( + self, + outputs, + batched_targets, + repeats=1, + repeat_batch=1, + out_is_valid=None, + target_is_valid_padded=None, + ): + """ + Performs the matching. The inputs and outputs are the same as + BinaryHungarianMatcher.forward, except for the optional cached_padded + flag and the optional "_boxes_padded" entry of batched_targets. + + Inputs: + - outputs: A dict with the following keys: + - "pred_logits": Tensor of shape (batch_size, num_queries, 1) with + classification logits + - "pred_boxes": Tensor of shape (batch_size, num_queries, 4) with + predicted box coordinates in cxcywh format. + - batched_targets: A dict of targets. There may be a variable number of + targets per batch entry; suppose that there are T_b targets for batch + entry 0 <= b < batch_size. It should have the following keys: + - "boxes": Tensor of shape (sum_b T_b, 4) giving ground-truth boxes + in cxcywh format for all batch entries packed into a single tensor + - "num_boxes": int64 Tensor of shape (batch_size,) giving the number + of ground-truth boxes per batch entry: num_boxes[b] = T_b + - "_boxes_padded": Tensor of shape (batch_size, max_b T_b, 4) giving + a padded version of ground-truth boxes. If this is not present then + it will be computed from batched_targets["boxes"] instead, but + caching it here can improve performance for repeated calls with the + same targets. + - out_is_valid: If not None, it should be a boolean tensor of shape + (batch_size, num_queries) indicating which predictions are valid. + Invalid predictions are ignored during matching and won't appear in + the output indices. + - target_is_valid_padded: If not None, it should be a boolean tensor of + shape (batch_size, max_num_gt_boxes) in padded format indicating + which GT boxes are valid. Invalid GT boxes are ignored during matching + and won't appear in the output indices. + + Returns: + A list of size batch_size, containing tuples of (idx_i, idx_j): + - idx_i is the indices of the selected predictions (in order) + - idx_j is the indices of the corresponding selected targets + (in order) + For each batch element, it holds: + len(index_i) = len(index_j) + = min(num_queries, num_target_boxes) + """ + _, num_queries = outputs["pred_logits"].shape[:2] + + out_score = outputs["pred_logits"].squeeze(-1) # (B, Q) + out_bbox = outputs["pred_boxes"] # (B, Q, 4)) + + device = out_score.device + + num_boxes = batched_targets["num_boxes"].cpu() + # Get a padded version of target boxes (as precomputed in the collator). + # It should work for both repeat==1 (o2o) and repeat>1 (o2m) matching. + tgt_bbox = batched_targets["boxes_padded"] + if self.remove_samples_with_0_gt: + # keep only samples w/ at least 1 GT box in targets (num_boxes and tgt_bbox) + batch_keep = num_boxes > 0 + num_boxes = num_boxes[batch_keep] + tgt_bbox = tgt_bbox[batch_keep] + if target_is_valid_padded is not None: + target_is_valid_padded = target_is_valid_padded[batch_keep] + # Repeat the targets (for the case of batched aux outputs in the matcher) + if repeat_batch > 1: + # In this case, out_prob and out_bbox will be a concatenation of + # both final and auxiliary outputs, so we also repeat the targets + num_boxes = num_boxes.repeat(repeat_batch) + tgt_bbox = tgt_bbox.repeat(repeat_batch, 1, 1) + if target_is_valid_padded is not None: + target_is_valid_padded = target_is_valid_padded.repeat(repeat_batch, 1) + + # keep only samples w/ at least 1 GT box in outputs + if self.remove_samples_with_0_gt: + if repeat_batch > 1: + batch_keep = batch_keep.repeat(repeat_batch) + out_score = out_score[batch_keep] + out_bbox = out_bbox[batch_keep] + if out_is_valid is not None: + out_is_valid = out_is_valid[batch_keep] + assert out_bbox.shape[0] == tgt_bbox.shape[0] + assert out_bbox.shape[0] == num_boxes.shape[0] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou( + box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox) + ) + + out_prob = self.norm(out_score) + if not self.focal: + cost_class = -out_prob.unsqueeze(-1).expand_as(cost_bbox) + else: + if self.stable: + rescaled_giou = (-cost_giou + 1) / 2 + out_prob = out_prob.unsqueeze(-1).expand_as(cost_bbox) * rescaled_giou + cost_class = -self.alpha * (1 - out_prob) ** self.gamma * torch.log( + out_prob + ) + (1 - self.alpha) * out_prob**self.gamma * torch.log(1 - out_prob) + else: + # directly computing log sigmoid (more numerically stable) + log_out_prob = torch.nn.functional.logsigmoid(out_score) + log_one_minus_out_prob = torch.nn.functional.logsigmoid(-out_score) + cost_class = ( + -self.alpha * (1 - out_prob) ** self.gamma * log_out_prob + + (1 - self.alpha) * out_prob**self.gamma * log_one_minus_out_prob + ) + if not self.stable: + cost_class = cost_class.unsqueeze(-1).expand_as(cost_bbox) + + assert cost_class.shape == cost_bbox.shape + + # Final cost matrix + C = ( + self.cost_bbox * cost_bbox + + self.cost_class * cost_class + + self.cost_giou * cost_giou + ) + # assign a very high cost (1e9) to invalid outputs and targets, so that we can + # filter them out (in `_do_matching`) from bipartite matching results + do_filtering = out_is_valid is not None or target_is_valid_padded is not None + if out_is_valid is not None: + C = torch.where(out_is_valid[:, :, None], C, 1e9) + if target_is_valid_padded is not None: + C = torch.where(target_is_valid_padded[:, None, :], C, 1e9) + C = C.cpu().numpy() + costs = [C[i, :, :s] for i, s in enumerate(num_boxes.tolist())] + return_tgt_indices = ( + do_filtering or torch.any(num_queries < num_boxes * max(repeats, 1)).item() + ) + if len(costs) == 0: + # We have size 0 in the batch dimension, so we return empty matching indices + # (note that this can happen due to `remove_samples_with_0_gt=True` even if + # the original input batch size is not 0, when all queries have empty GTs). + indices = [] + tgt_idx = torch.zeros(0).long().to(device) if return_tgt_indices else None + elif return_tgt_indices: + indices, tgt_indices = zip( + *( + _do_matching( + c, + repeats=repeats, + return_tgt_indices=return_tgt_indices, + do_filtering=do_filtering, + ) + for c in costs + ) + ) + tgt_indices = list(tgt_indices) + sizes = torch.cumsum(num_boxes, -1)[:-1] + for i in range(1, len(tgt_indices)): + tgt_indices[i] += sizes[i - 1].item() + tgt_idx = torch.from_numpy(np.concatenate(tgt_indices)).long().to(device) + else: + indices = [ + _do_matching(c, repeats=repeats, do_filtering=do_filtering) + for c in costs + ] + tgt_idx = None + + if self.remove_samples_with_0_gt: + kept_inds = batch_keep.nonzero().squeeze(1) + batch_idx = torch.as_tensor( + sum([[kept_inds[i]] * len(src) for i, src in enumerate(indices)], []), + dtype=torch.long, + device=device, + ) + else: + batch_idx = torch.as_tensor( + sum([[i] * len(src) for i, src in enumerate(indices)], []), + dtype=torch.long, + device=device, + ) + + # indices could be an empty list (since we remove samples w/ 0 GT boxes) + if len(indices) > 0: + src_idx = torch.from_numpy(np.concatenate(indices)).long().to(device) + else: + src_idx = torch.empty(0, dtype=torch.long, device=device) + return batch_idx, src_idx, tgt_idx + + +class BinaryOneToManyMatcher(nn.Module): + """ + This class computes a greedy assignment between the targets and the predictions of the network. + In this formulation, several predictions can be assigned to each target, but each prediction can be assigned to + at most one target. + + See DAC-Detr for details + """ + + def __init__( + self, + alpha: float = 0.3, + threshold: float = 0.4, + topk: int = 6, + ): + """ + Creates the matcher + + Params: + alpha: relative balancing between classification and localization + threshold: threshold used to select positive predictions + topk: number of top scoring predictions to consider + """ + super().__init__() + self.norm = nn.Sigmoid() + self.alpha = alpha + self.threshold = threshold + self.topk = topk + + @torch.no_grad() + def forward( + self, + outputs, + batched_targets, + repeats=1, + repeat_batch=1, + out_is_valid=None, + target_is_valid_padded=None, + ): + """ + Performs the matching. The inputs and outputs are the same as + BinaryHungarianMatcher.forward + + Inputs: + - outputs: A dict with the following keys: + - "pred_logits": Tensor of shape (batch_size, num_queries, 1) with + classification logits + - "pred_boxes": Tensor of shape (batch_size, num_queries, 4) with + predicted box coordinates in cxcywh format. + - batched_targets: A dict of targets. There may be a variable number of + targets per batch entry; suppose that there are T_b targets for batch + entry 0 <= b < batch_size. It should have the following keys: + - "num_boxes": int64 Tensor of shape (batch_size,) giving the number + of ground-truth boxes per batch entry: num_boxes[b] = T_b + - "_boxes_padded": Tensor of shape (batch_size, max_b T_b, 4) giving + a padded version of ground-truth boxes. If this is not present then + it will be computed from batched_targets["boxes"] instead, but + caching it here can improve performance for repeated calls with the + same targets. + - out_is_valid: If not None, it should be a boolean tensor of shape + (batch_size, num_queries) indicating which predictions are valid. + Invalid predictions are ignored during matching and won't appear in + the output indices. + - target_is_valid_padded: If not None, it should be a boolean tensor of + shape (batch_size, max_num_gt_boxes) in padded format indicating + which GT boxes are valid. Invalid GT boxes are ignored during matching + and won't appear in the output indices. + Returns: + A list of size batch_size, containing tuples of (idx_i, idx_j): + - idx_i is the indices of the selected predictions (in order) + - idx_j is the indices of the corresponding selected targets + (in order) + For each batch element, it holds: + len(index_i) = len(index_j) + = min(num_queries, num_target_boxes) + """ + assert repeats <= 1 and repeat_batch <= 1 + bs, num_queries = outputs["pred_logits"].shape[:2] + + out_prob = self.norm(outputs["pred_logits"]).squeeze(-1) # (B, Q) + out_bbox = outputs["pred_boxes"] # (B, Q, 4)) + + num_boxes = batched_targets["num_boxes"] + + # Get a padded version of target boxes (as precomputed in the collator). + tgt_bbox = batched_targets["boxes_padded"] + assert len(tgt_bbox) == bs + num_targets = tgt_bbox.shape[1] + if num_targets == 0: + return ( + torch.empty(0, dtype=torch.long, device=out_prob.device), + torch.empty(0, dtype=torch.long, device=out_prob.device), + torch.empty(0, dtype=torch.long, device=out_prob.device), + ) + + iou, _ = box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) + + assert iou.shape == (bs, num_queries, num_targets) + + # Final cost matrix (higher is better in `C`; this is unlike the case + # of BinaryHungarianMatcherV2 above where lower is better in its `C`) + C = self.alpha * out_prob.unsqueeze(-1) + (1 - self.alpha) * iou + if out_is_valid is not None: + C = torch.where(out_is_valid[:, :, None], C, -1e9) + if target_is_valid_padded is not None: + C = torch.where(target_is_valid_padded[:, None, :], C, -1e9) + + # Selecting topk predictions + matches = C > torch.quantile( + C, 1 - self.topk / num_queries, dim=1, keepdim=True + ) + + # Selecting predictions above threshold + matches = matches & (C > self.threshold) + if out_is_valid is not None: + matches = matches & out_is_valid[:, :, None] + if target_is_valid_padded is not None: + matches = matches & target_is_valid_padded[:, None, :] + + # Removing padding + matches = matches & ( + torch.arange(0, num_targets, device=num_boxes.device)[None] + < num_boxes[:, None] + ).unsqueeze(1) + + batch_idx, src_idx, tgt_idx = torch.nonzero(matches, as_tuple=True) + + cum_num_boxes = torch.cat( + [ + torch.zeros(1, dtype=num_boxes.dtype, device=num_boxes.device), + num_boxes.cumsum(-1)[:-1], + ] + ) + tgt_idx += cum_num_boxes[batch_idx] + + return batch_idx, src_idx, tgt_idx diff --git a/detect_tools/sam3/sam3/train/nms_helper.py b/detect_tools/sam3/sam3/train/nms_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5b6dc7b3f28878494bf06ac7ec71c879253304 --- /dev/null +++ b/detect_tools/sam3/sam3/train/nms_helper.py @@ -0,0 +1,306 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import warnings +from typing import Dict, List + +import numpy as np + +# Check if Numba is available +HAS_NUMBA = False +try: + import numba as nb + + HAS_NUMBA = True +except ImportError: + warnings.warn( + "Numba not found. Using slower pure Python implementations.", UserWarning + ) + + +# -------------------- Helper Functions -------------------- +def is_zero_box(bbox: list) -> bool: + """Check if bounding box is invalid""" + if bbox is None: + return True + return all(x <= 0 for x in bbox[:4]) or len(bbox) < 4 + + +def convert_bbox_format(bbox: list) -> List[float]: + """Convert bbox from (x,y,w,h) to (x1,y1,x2,y2)""" + x, y, w, h = bbox + return [x, y, x + w, y + h] + + +# -------------------- Track-level NMS -------------------- +def process_track_level_nms(video_groups: Dict, nms_threshold: float) -> Dict: + """Apply track-level NMS to all videos""" + for video_id, tracks in video_groups.items(): + track_detections = [] + + # Process tracks + for track_idx, track in enumerate(tracks): + if not track["bboxes"]: + continue + + converted_bboxes = [] + valid_frames = [] + for bbox in track["bboxes"]: + if bbox and not is_zero_box(bbox): + converted_bboxes.append(convert_bbox_format(bbox)) + valid_frames.append(True) + else: + converted_bboxes.append([np.nan] * 4) + valid_frames.append(False) + + if any(valid_frames): + track_detections.append( + { + "track_idx": track_idx, + "bboxes": np.array(converted_bboxes, dtype=np.float32), + "score": track["score"], + } + ) + + # Apply NMS + if track_detections: + scores = np.array([d["score"] for d in track_detections], dtype=np.float32) + keep = apply_track_nms(track_detections, scores, nms_threshold) + + # Suppress non-kept tracks + for idx, track in enumerate(track_detections): + if idx not in keep: + tracks[track["track_idx"]]["bboxes"] = [None] * len(track["bboxes"]) + + return video_groups + + +# -------------------- Frame-level NMS -------------------- +def process_frame_level_nms(video_groups: Dict, nms_threshold: float) -> Dict: + """Apply frame-level NMS to all videos""" + for video_id, tracks in video_groups.items(): + if not tracks: + continue + + num_frames = len(tracks[0]["bboxes"]) + + for frame_idx in range(num_frames): + frame_detections = [] + + # Collect valid detections + for track_idx, track in enumerate(tracks): + bbox = track["bboxes"][frame_idx] + if bbox and not is_zero_box(bbox): + frame_detections.append( + { + "track_idx": track_idx, + "bbox": np.array( + convert_bbox_format(bbox), dtype=np.float32 + ), + "score": track["score"], + } + ) + + # Apply NMS + if frame_detections: + bboxes = np.stack([d["bbox"] for d in frame_detections]) + scores = np.array( + [d["score"] for d in frame_detections], dtype=np.float32 + ) + keep = apply_frame_nms(bboxes, scores, nms_threshold) + + # Suppress non-kept detections + for i, d in enumerate(frame_detections): + if i not in keep: + tracks[d["track_idx"]]["bboxes"][frame_idx] = None + + return video_groups + + +# Track-level NMS helpers ------------------------------------------------------ +def compute_track_iou_matrix( + bboxes_stacked: np.ndarray, valid_masks: np.ndarray, areas: np.ndarray +) -> np.ndarray: + """IoU matrix computation for track-level NMS with fallback to pure Python""" + num_tracks = bboxes_stacked.shape[0] + iou_matrix = np.zeros((num_tracks, num_tracks), dtype=np.float32) + if HAS_NUMBA: + iou_matrix = _compute_track_iou_matrix_numba(bboxes_stacked, valid_masks, areas) + else: + # Pure Python implementation + for i in range(num_tracks): + for j in range(i + 1, num_tracks): + valid_ij = valid_masks[i] & valid_masks[j] + if not valid_ij.any(): + continue + bboxes_i = bboxes_stacked[i, valid_ij] + bboxes_j = bboxes_stacked[j, valid_ij] + area_i = areas[i, valid_ij] + area_j = areas[j, valid_ij] + inter_total = 0.0 + union_total = 0.0 + for k in range(bboxes_i.shape[0]): + x1 = max(bboxes_i[k, 0], bboxes_j[k, 0]) + y1 = max(bboxes_i[k, 1], bboxes_j[k, 1]) + x2 = min(bboxes_i[k, 2], bboxes_j[k, 2]) + y2 = min(bboxes_i[k, 3], bboxes_j[k, 3]) + inter = max(0, x2 - x1) * max(0, y2 - y1) + union = area_i[k] + area_j[k] - inter + inter_total += inter + union_total += union + if union_total > 0: + iou_matrix[i, j] = inter_total / union_total + iou_matrix[j, i] = iou_matrix[i, j] + return iou_matrix + + +if HAS_NUMBA: + + @nb.jit(nopython=True, parallel=True) + def _compute_track_iou_matrix_numba(bboxes_stacked, valid_masks, areas): + """Numba-optimized IoU matrix computation for track-level NMS""" + num_tracks = bboxes_stacked.shape[0] + iou_matrix = np.zeros((num_tracks, num_tracks), dtype=np.float32) + for i in nb.prange(num_tracks): + for j in range(i + 1, num_tracks): + valid_ij = valid_masks[i] & valid_masks[j] + if not valid_ij.any(): + continue + bboxes_i = bboxes_stacked[i, valid_ij] + bboxes_j = bboxes_stacked[j, valid_ij] + area_i = areas[i, valid_ij] + area_j = areas[j, valid_ij] + inter_total = 0.0 + union_total = 0.0 + for k in range(bboxes_i.shape[0]): + x1 = max(bboxes_i[k, 0], bboxes_j[k, 0]) + y1 = max(bboxes_i[k, 1], bboxes_j[k, 1]) + x2 = min(bboxes_i[k, 2], bboxes_j[k, 2]) + y2 = min(bboxes_i[k, 3], bboxes_j[k, 3]) + inter = max(0, x2 - x1) * max(0, y2 - y1) + union = area_i[k] + area_j[k] - inter + inter_total += inter + union_total += union + if union_total > 0: + iou_matrix[i, j] = inter_total / union_total + iou_matrix[j, i] = iou_matrix[i, j] + return iou_matrix + + +def apply_track_nms( + track_detections: List[dict], scores: np.ndarray, nms_threshold: float +) -> List[int]: + """Vectorized track-level NMS implementation""" + if not track_detections: + return [] + bboxes_stacked = np.stack([d["bboxes"] for d in track_detections], axis=0) + valid_masks = ~np.isnan(bboxes_stacked).any(axis=2) + areas = (bboxes_stacked[:, :, 2] - bboxes_stacked[:, :, 0]) * ( + bboxes_stacked[:, :, 3] - bboxes_stacked[:, :, 1] + ) + areas[~valid_masks] = 0 + iou_matrix = compute_track_iou_matrix(bboxes_stacked, valid_masks, areas) + keep = [] + order = np.argsort(-scores) + suppress = np.zeros(len(track_detections), dtype=bool) + for i in range(len(order)): + if not suppress[order[i]]: + keep.append(order[i]) + suppress[order[i:]] = suppress[order[i:]] | ( + iou_matrix[order[i], order[i:]] >= nms_threshold + ) + return keep + + +# Frame-level NMS helpers ------------------------------------------------------ +def compute_frame_ious(bbox: np.ndarray, bboxes: np.ndarray) -> np.ndarray: + """IoU computation for frame-level NMS with fallback to pure Python""" + if HAS_NUMBA: + return _compute_frame_ious_numba(bbox, bboxes) + else: + # Pure Python implementation + ious = np.zeros(len(bboxes), dtype=np.float32) + for i in range(len(bboxes)): + x1 = max(bbox[0], bboxes[i, 0]) + y1 = max(bbox[1], bboxes[i, 1]) + x2 = min(bbox[2], bboxes[i, 2]) + y2 = min(bbox[3], bboxes[i, 3]) + + inter = max(0, x2 - x1) * max(0, y2 - y1) + area1 = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) + area2 = (bboxes[i, 2] - bboxes[i, 0]) * (bboxes[i, 3] - bboxes[i, 1]) + union = area1 + area2 - inter + + ious[i] = inter / union if union > 0 else 0.0 + return ious + + +if HAS_NUMBA: + + @nb.jit(nopython=True, parallel=True) + def _compute_frame_ious_numba(bbox, bboxes): + """Numba-optimized IoU computation""" + ious = np.zeros(len(bboxes), dtype=np.float32) + for i in nb.prange(len(bboxes)): + x1 = max(bbox[0], bboxes[i, 0]) + y1 = max(bbox[1], bboxes[i, 1]) + x2 = min(bbox[2], bboxes[i, 2]) + y2 = min(bbox[3], bboxes[i, 3]) + + inter = max(0, x2 - x1) * max(0, y2 - y1) + area1 = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) + area2 = (bboxes[i, 2] - bboxes[i, 0]) * (bboxes[i, 3] - bboxes[i, 1]) + union = area1 + area2 - inter + + ious[i] = inter / union if union > 0 else 0.0 + return ious + + +def apply_frame_nms( + bboxes: np.ndarray, scores: np.ndarray, nms_threshold: float +) -> List[int]: + """Frame-level NMS implementation with fallback to pure Python""" + if HAS_NUMBA: + return _apply_frame_nms_numba(bboxes, scores, nms_threshold) + else: + # Pure Python implementation + order = np.argsort(-scores) + keep = [] + suppress = np.zeros(len(bboxes), dtype=bool) + + for i in range(len(order)): + if not suppress[order[i]]: + keep.append(order[i]) + current_bbox = bboxes[order[i]] + + remaining_bboxes = bboxes[order[i + 1 :]] + if len(remaining_bboxes) > 0: # Check if there are any remaining boxes + ious = compute_frame_ious(current_bbox, remaining_bboxes) + suppress[order[i + 1 :]] = suppress[order[i + 1 :]] | ( + ious >= nms_threshold + ) + + return keep + + +if HAS_NUMBA: + + @nb.jit(nopython=True) + def _apply_frame_nms_numba(bboxes, scores, nms_threshold): + """Numba-optimized NMS implementation""" + order = np.argsort(-scores) + keep = [] + suppress = np.zeros(len(bboxes), dtype=nb.boolean) + + for i in range(len(order)): + if not suppress[order[i]]: + keep.append(order[i]) + current_bbox = bboxes[order[i]] + + if i + 1 < len(order): # Check bounds + ious = _compute_frame_ious_numba( + current_bbox, bboxes[order[i + 1 :]] + ) + suppress[order[i + 1 :]] = suppress[order[i + 1 :]] | ( + ious >= nms_threshold + ) + + return keep diff --git a/detect_tools/sam3/sam3/train/optim/__init__.py b/detect_tools/sam3/sam3/train/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/train/optim/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/train/optim/optimizer.py b/detect_tools/sam3/sam3/train/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..d401b98f2fff967cdbe5bf5c9db7c78f21b30503 --- /dev/null +++ b/detect_tools/sam3/sam3/train/optim/optimizer.py @@ -0,0 +1,498 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import fnmatch +import inspect +import itertools +import logging +import types +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + Union, +) + +import hydra + +import torch +import torch.nn as nn +from omegaconf import DictConfig +from torch import Tensor + + +class Optimizer: + def __init__(self, optimizer, schedulers=None) -> None: + self.optimizer = optimizer + self.schedulers = schedulers + self._validate_optimizer_schedulers() + self.step_schedulers(0.0, 0) + + def _validate_optimizer_schedulers(self): + if self.schedulers is None: + return + for _, set_of_schedulers in enumerate(self.schedulers): + for option, _ in set_of_schedulers.items(): + assert option in self.optimizer.defaults, ( + "Optimizer option " + f"{option} not found in {self.optimizer}. Valid options are " + f"{self.optimizer.defaults.keys()}" + ) + + def step_schedulers(self, where: float, step: int) -> None: + if self.schedulers is None: + return + for i, param_group in enumerate(self.optimizer.param_groups): + for option, scheduler in self.schedulers[i].items(): + if "step" in inspect.signature(scheduler.__call__).parameters: + new_value = scheduler(step=step, where=where) + elif ( + hasattr(scheduler, "scheduler") + and "step" + in inspect.signature(scheduler.scheduler.__call__).parameters + ): + # To handle ValueScaler wrappers + new_value = scheduler(step=step, where=where) + else: + new_value = scheduler(where) + param_group[option] = new_value + + def step(self, where, step, closure=None): + self.step_schedulers(where, step) + return self.optimizer.step(closure) + + def zero_grad(self, *args, **kwargs): + return self.optimizer.zero_grad(*args, **kwargs) + + +def set_default_parameters( + scheduler_cfgs: List[DictConfig], all_parameter_names: Set[str] +) -> None: + """Set up the "default" scheduler with the right parameters. + + Args: + scheduler_cgfs: A list of scheduler configs, where each scheduler also + specifies which parameters it applies to, based on the names of parameters + or the class of the modules. At most one scheduler is allowed to skip this + specification, which is used as a "default" specification for any remaining + parameters. + all_parameter_names: Names of all the parameters to consider. + """ + constraints = [ + scheduler_cfg.parameter_names + for scheduler_cfg in scheduler_cfgs + if scheduler_cfg.parameter_names is not None + ] + if len(constraints) == 0: + default_params = set(all_parameter_names) + else: + default_params = all_parameter_names - set.union(*constraints) + default_count = 0 + for scheduler_cfg in scheduler_cfgs: + if scheduler_cfg.parameter_names is None: + scheduler_cfg.parameter_names = default_params + default_count += 1 + assert default_count <= 1, "Only one scheduler per option can be default" + if default_count == 0: + # No default scheduler specified, add a default, but without any scheduler + # for that option + scheduler_cfgs.append({"parameter_names": default_params}) + + +def name_constraints_to_parameters( + param_constraints: List[Set[str]], named_parameters: Dict[str, Tensor] +) -> List[torch.nn.Parameter]: + """Return parameters which match the intersection of parameter constraints. + + Note that this returns the parameters themselves, not their names. + + Args: + param_constraints: A list, with each element being a set of allowed parameters. + named_parameters: Mapping from a parameter name to the parameter itself. + + Returns: + A list containing the parameters which overlap with _each_ constraint set from + param_constraints. + """ + matching_names = set.intersection(*param_constraints) + return [value for name, value in named_parameters.items() if name in matching_names] + + +def map_scheduler_cfgs_to_param_groups( + all_scheduler_cfgs: Iterable[List[Dict]], + named_parameters: Dict[str, Tensor], +) -> Tuple[List[Dict[Any, Any]], List[Dict[str, List[torch.nn.Parameter]]]]: + """Produce parameter groups corresponding to all the scheduler configs. + + Takes all the scheduler configs, each of which applies to a specific optimizer + option (like "lr" or "weight_decay") and has a set of parameter names which it + applies to, and produces a final set of param groups where each param group + covers all the options which apply to a particular set of parameters. + + Args: + all_scheduler_cfgs: All the scheduler configs covering every option. + named_parameters: Mapping from a parameter name to the parameter itself. + Returns: + Tuple of lists of schedulers and param_groups, where schedulers[i] + applies to param_groups[i]. + """ + + scheduler_cfgs_per_param_group = itertools.product(*all_scheduler_cfgs) + schedulers = [] + param_groups = [] + for scheduler_cfgs in scheduler_cfgs_per_param_group: + param_constraints = [ + scheduler_cfg["parameter_names"] for scheduler_cfg in scheduler_cfgs + ] + matching_parameters = name_constraints_to_parameters( + param_constraints, named_parameters + ) + if len(matching_parameters) == 0: # If no overlap of parameters, skip + continue + schedulers_for_group = { + scheduler_cfg["option"]: scheduler_cfg["scheduler"] + for scheduler_cfg in scheduler_cfgs + if "option" in scheduler_cfg + } + schedulers.append(schedulers_for_group) + param_groups.append({"params": matching_parameters}) + return schedulers, param_groups + + +def validate_param_group_params(param_groups: List[Dict], model: nn.Module): + """Check that the param groups are non-overlapping and cover all the parameters. + + Args: + param_groups: List of all param groups + model: Model to validate against. The check ensures that all the model + parameters are part of param_groups + """ + for pg in param_groups: + # no param should be repeated within a group + assert len(pg["params"]) == len(set(pg["params"])) + parameters = [set(param_group["params"]) for param_group in param_groups] + model_parameters = {parameter for _, parameter in model.named_parameters()} + for p1, p2 in itertools.permutations(parameters, 2): + assert p1.isdisjoint(p2), "Scheduler generated param_groups should be disjoint" + assert set.union(*parameters) == model_parameters, ( + "Scheduler generated param_groups must include all parameters of the model." + f" Found {len(set.union(*parameters))} params whereas model has" + f" {len(model_parameters)} params" + ) + + +def unix_module_cls_pattern_to_parameter_names( + filter_module_cls_names: List[str], + module_cls_to_param_names: Dict[Type, str], +) -> Union[None, Set[str]]: + """Returns param names which pass the filters specified in filter_module_cls_names. + + Args: + filter_module_cls_names: A list of filter strings containing class names, like + ["torch.nn.LayerNorm", "torch.nn.BatchNorm2d"] + module_cls_to_param_names: Mapping from module classes to the parameter names + they contain. See `get_module_cls_to_param_names`. + """ + if filter_module_cls_names is None: + return set() + allowed_parameter_names = [] + for module_cls_name in filter_module_cls_names: + module_cls = hydra.utils.get_class(module_cls_name) + if module_cls not in module_cls_to_param_names: + raise AssertionError( + f"module_cls_name {module_cls_name} does not " + "match any classes in the model" + ) + matching_parameters = module_cls_to_param_names[module_cls] + assert ( + len(matching_parameters) > 0 + ), f"module_cls_name {module_cls_name} does not contain any parameters in the model" + logging.info( + f"Matches for module_cls_name [{module_cls_name}]: {matching_parameters} " + ) + allowed_parameter_names.append(matching_parameters) + return set.union(*allowed_parameter_names) + + +def unix_param_pattern_to_parameter_names( + filter_param_names: Optional[List[str]], + parameter_names: Dict[str, torch.Tensor], +) -> Union[None, Set[str]]: + """Returns param names which pass the filters specified in filter_param_names. + + Args: + filter_param_names: A list of unix-style filter strings with optional + wildcards, like ["block.2.*", "block.2.linear.weight"] + module_cls_to_param_names: Mapping from module classes to the parameter names + they contain. See `get_module_cls_to_param_names`. + """ + + if filter_param_names is None: + return set() + allowed_parameter_names = [] + for param_name in filter_param_names: + matching_parameters = set(fnmatch.filter(parameter_names, param_name)) + assert ( + len(matching_parameters) >= 1 + ), f"param_name {param_name} does not match any parameters in the model" + logging.info(f"Matches for param_name [{param_name}]: {matching_parameters}") + allowed_parameter_names.append(matching_parameters) + return set.union(*allowed_parameter_names) + + +def _unix_pattern_to_parameter_names( + scheduler_cfg: DictConfig, + parameter_names: Set[str], + module_cls_to_param_names: Dict[Type, str], +) -> Union[None, Set[str]]: + """Returns param names which pass the filters specified in scheduler_cfg. + + Args: + scheduler_cfg: The config for the scheduler + parameter_names: The set of all parameter names which will be filtered + """ + if "param_names" not in scheduler_cfg and "module_cls_names" not in scheduler_cfg: + return None + return unix_param_pattern_to_parameter_names( + scheduler_cfg.get("param_names"), parameter_names + ).union( + unix_module_cls_pattern_to_parameter_names( + scheduler_cfg.get("module_cls_names"), module_cls_to_param_names + ) + ) + + +def get_module_cls_to_param_names( + model: nn.Module, param_allowlist: Set[str] = None +) -> Dict[Type, str]: + """Produce a mapping from all the modules classes to the names of parames they own. + + Only counts a parameter as part of the immediate parent module, i.e. recursive + parents do not count. + + Args: + model: Model to iterate over + param_allowlist: If specified, only these param names will be processed + """ + + module_cls_to_params = {} + for module_name, module in model.named_modules(): + module_cls = type(module) + module_cls_to_params.setdefault(module_cls, set()) + for param_name, _ in module.named_parameters(recurse=False): + full_param_name = get_full_parameter_name(module_name, param_name) + if param_allowlist is None or full_param_name in param_allowlist: + module_cls_to_params[module_cls].add(full_param_name) + return module_cls_to_params + + +def construct_optimizer( + model: torch.nn.Module, + optimizer_conf: Any, + options_conf: Mapping[str, List] = None, + param_group_modifiers_conf: List[Callable] = None, + param_allowlist: Optional[Set[str]] = None, + validate_param_groups=True, +) -> Optimizer: + """ + Constructs a stochastic gradient descent or ADAM (or ADAMw) optimizer + with momentum. i.e, constructs a torch.optim.Optimizer with zero-weight decay + Batchnorm and/or no-update 1-D parameters support, based on the config. + + Supports wrapping the optimizer with Layer-wise Adaptive Rate Scaling + (LARS): https://arxiv.org/abs/1708.03888 + + Args: + model: model to perform stochastic gradient descent + optimization or ADAM optimization. + optimizer_conf: Hydra config consisting a partial torch optimizer like SGD or + ADAM, still missing the params argument which this function provides to + produce the final optimizer + param_group_modifiers_conf: Optional user specified functions which can modify + the final scheduler configs before the optimizer's param groups are built + param_allowlist: The parameters to optimize. Parameters which are not part of + this allowlist will be skipped. + validate_param_groups: If enabled, valides that the produced param_groups don't + overlap and cover all the model parameters. + """ + if param_allowlist is None: + param_allowlist = {name for name, _ in model.named_parameters()} + + named_parameters = { + name: param + for name, param in model.named_parameters() + if name in param_allowlist + } + + if not options_conf: + optimizer = hydra.utils.instantiate(optimizer_conf, named_parameters.values()) + return Optimizer(optimizer) + + all_parameter_names = { + name for name, _ in model.named_parameters() if name in param_allowlist + } + module_cls_to_all_param_names = get_module_cls_to_param_names( + model, param_allowlist + ) + + scheduler_cfgs_per_option = hydra.utils.instantiate(options_conf) + all_scheduler_cfgs = [] + for option, scheduler_cfgs in scheduler_cfgs_per_option.items(): + for config in scheduler_cfgs: + config.option = option + config.parameter_names = _unix_pattern_to_parameter_names( + config, all_parameter_names, module_cls_to_all_param_names + ) + set_default_parameters(scheduler_cfgs, all_parameter_names) + all_scheduler_cfgs.append(scheduler_cfgs) + + if param_group_modifiers_conf: + for custom_param_modifier in param_group_modifiers_conf: + custom_param_modifier = hydra.utils.instantiate(custom_param_modifier) + all_scheduler_cfgs = custom_param_modifier( + scheduler_cfgs=all_scheduler_cfgs, model=model + ) + schedulers, param_groups = map_scheduler_cfgs_to_param_groups( + all_scheduler_cfgs, named_parameters + ) + if validate_param_groups: + validate_param_group_params(param_groups, model) + optimizer = hydra.utils.instantiate(optimizer_conf, param_groups) + return Optimizer(optimizer, schedulers) + + +def get_full_parameter_name(module_name, param_name): + if module_name == "": + return param_name + return f"{module_name}.{param_name}" + + +class GradientClipper: + """ + Gradient clipping utils that works for DDP + """ + + def __init__(self, max_norm: float = 1.0, norm_type: int = 2): + assert isinstance(max_norm, (int, float)) or max_norm is None + self.max_norm = max_norm if max_norm is None else float(max_norm) + self.norm_type = norm_type + + def __call__(self, model: nn.Module): + if self.max_norm is None: + return # no-op + + nn.utils.clip_grad_norm_( + model.parameters(), max_norm=self.max_norm, norm_type=self.norm_type + ) + + +class ValueScaler: + def __init__(self, scheduler, mult_val: float): + self.scheduler = scheduler + self.mult_val = mult_val + + def __call__(self, *args, **kwargs): + val = self.scheduler(*args, **kwargs) + return val * self.mult_val + + +def rgetattr(obj, rattrs: str = None): + """ + Like getattr(), but supports dotted notation for nested objects. + rattrs is a str of form 'attr1.attr2', returns obj.attr1.attr2 + """ + if rattrs is None: + return obj + attrs = rattrs.split(".") + for attr in attrs: + obj = getattr(obj, attr) + return obj + + +def layer_decay_param_modifier( + scheduler_cfgs: List[List[Dict]], + model, + layer_decay_value: float, + layer_decay_min: Optional[float] = None, + apply_to: Optional[str] = None, + overrides: List[Dict] = (), +) -> List[List[Dict]]: + """ + Args + - scheduler_cfgs: a list of omegaconf.ListConfigs. + Each element in the list is a omegaconfg.DictConfig with the following structure + { + "scheduler": + "option": possible options are "lr", "weight_decay" etc. + "parameter_names": Set of str indicating param names that this scheduler applies to + } + - model: a model that implements a method `get_layer_id` that maps layer_name to an integer and + and a method get_num_layers. + Alternatively, use apply_to argument to select a specific component of the model. + - layer_decay_value: float + - layer_decay_min: min val for layer decay + - apply_to: optional arg to select which component of the model to apply the the layer decay modifier to + - overrides: to manually override lr for specific patterns. Is a list of dicts. Each dict, has keys "pattern", "value". + Returns + - scheduler_configs: same structure as the input, elements can be modified + """ + model = rgetattr(model, apply_to) + num_layers = model.get_num_layers() + 1 + layer_decays = [ + layer_decay_value ** (num_layers - i) for i in range(num_layers + 1) + ] + if layer_decay_min is not None: + layer_decays = [max(val, layer_decay_min) for val in layer_decays] + final_scheduler_cfgs = [] + # scheduler_cfgs is a list of lists + for scheduler_cfg_group in scheduler_cfgs: + curr_cfg_group = [] + # scheduler_cfg_group is a list of dictionaries + for scheduler_cfg in scheduler_cfg_group: + if scheduler_cfg["option"] != "lr": + curr_cfg_group.append(scheduler_cfg) + continue + # Need sorted so that the list of parameter names is deterministic and consistent + # across re-runs of this job. Else it was causing issues with loading the optimizer + # state during a job restart + parameter_names = sorted(scheduler_cfg["parameter_names"]) + + # Only want one cfg group per layer + layer_cfg_groups = {} + for param_name in parameter_names: + layer_id = num_layers + this_scale = layer_decays[layer_id] + if param_name.startswith(apply_to): + layer_id = model.get_layer_id(param_name) + this_scale = layer_decays[layer_id] + # Overrides + for override in overrides: + if fnmatch.fnmatchcase(param_name, override["pattern"]): + this_scale = float(override["value"]) + layer_id = override["pattern"] + break + + if layer_id not in layer_cfg_groups: + curr_param = { + "option": scheduler_cfg["option"], + "scheduler": ValueScaler( + scheduler_cfg["scheduler"], this_scale + ), + "parameter_names": {param_name}, + } + else: + curr_param = layer_cfg_groups[layer_id] + curr_param["parameter_names"].add(param_name) + layer_cfg_groups[layer_id] = curr_param + + for layer_cfg in layer_cfg_groups.values(): + curr_cfg_group.append(layer_cfg) + + final_scheduler_cfgs.append(curr_cfg_group) + return final_scheduler_cfgs diff --git a/detect_tools/sam3/sam3/train/optim/schedulers.py b/detect_tools/sam3/sam3/train/optim/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..59da840f1d0a854e816710273f341c471490b788 --- /dev/null +++ b/detect_tools/sam3/sam3/train/optim/schedulers.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math + + +class InverseSquareRootParamScheduler: + def __init__( + self, + base_lr: float, + warmup_steps: int, + cooldown_steps: int, + timescale: int, + ): + self.base_lr = base_lr + self.warmup_steps = warmup_steps + self.cooldown_steps = cooldown_steps + self.timescale = timescale + + def __call__(self, step: int, where: float): + lr = self.base_lr + + if where > 0: + total_steps = step / where + progress = (step - self.warmup_steps) / float( + total_steps - self.warmup_steps + ) + progress = max(min(progress, 1), 0) + else: + progress = 0 + total_steps = 1 + + shift = self.timescale - self.warmup_steps + if self.warmup_steps < step: + lr = lr / math.sqrt((step + shift) / self.timescale) + + if self.warmup_steps: + lr = lr * min(1.0, step / self.warmup_steps) + if self.cooldown_steps: + lr = lr * min(1.0, (total_steps - step) / self.cooldown_steps) + + return lr diff --git a/detect_tools/sam3/sam3/train/train.py b/detect_tools/sam3/sam3/train/train.py new file mode 100644 index 0000000000000000000000000000000000000000..b3e995c5e615f1c9bd8c6c8a1f93611166443e33 --- /dev/null +++ b/detect_tools/sam3/sam3/train/train.py @@ -0,0 +1,339 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +import os +import random +import sys +import traceback +from argparse import ArgumentParser +from copy import deepcopy + +import submitit +import torch + +from hydra import compose, initialize_config_module +from hydra.utils import instantiate + +from iopath.common.file_io import g_pathmgr +from omegaconf import OmegaConf + +from sam3.train.utils.train_utils import makedir, register_omegaconf_resolvers +from tqdm import tqdm + + +os.environ["HYDRA_FULL_ERROR"] = "1" + + +class SlurmEvent: + QUEUED = "QUEUED" + START = "START" + FINISH = "FINISH" + JOB_ERROR = "JOB_ERROR" + SLURM_SIGNAL = "SLURM_SIGNAL" + + +def handle_custom_resolving(cfg): + # We'll resolve the config here, so we can catch mistakes early. + # However, we need to pass the un-resolved config to the launcher + # (because DVC resolving needs to be done on the node it will run on) + # First, do a copy without triggering resolving + cfg_resolved = OmegaConf.to_container(cfg, resolve=False) + cfg_resolved = OmegaConf.create(cfg_resolved) + return cfg_resolved + + +def single_proc_run(local_rank, main_port, cfg, world_size): + """Single GPU process""" + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = str(main_port) + os.environ["RANK"] = str(local_rank) + os.environ["LOCAL_RANK"] = str(local_rank) + os.environ["WORLD_SIZE"] = str(world_size) + try: + register_omegaconf_resolvers() + except Exception as e: + logging.info(e) + + trainer = instantiate(cfg.trainer, _recursive_=False) + trainer.run() + + +def single_node_runner(cfg, main_port: int): + assert cfg.launcher.num_nodes == 1 + # assert cfg.launcher.gpus_per_node == 1 + num_proc = cfg.launcher.gpus_per_node + torch.multiprocessing.set_start_method( + "spawn" + ) # CUDA runtime does not support `fork` + if num_proc == 1: + # directly call single_proc so we can easily set breakpoints + # mp.spawn does not let us set breakpoints + single_proc_run(local_rank=0, main_port=main_port, cfg=cfg, world_size=num_proc) + else: + mp_runner = torch.multiprocessing.start_processes + args = (main_port, cfg, num_proc) + # Note: using "fork" below, "spawn" causes time and error regressions. Using + # spawn changes the default multiprocessing context to spawn, which doesn't + # interact well with the dataloaders (likely due to the use of OpenCV). + mp_runner(single_proc_run, args=args, nprocs=num_proc, start_method="spawn") + + +def format_exception(e: Exception, limit=20): + traceback_str = "".join(traceback.format_tb(e.__traceback__, limit=limit)) + return f"{type(e).__name__}: {e}\nTraceback:\n{traceback_str}" + + +class SubmititRunner(submitit.helpers.Checkpointable): + """A callable which is passed to submitit to launch the jobs.""" + + def __init__(self, port, cfg): + self.cfg = cfg + self.port = port + self.has_setup = False + + def run_trainer(self): + job_env = submitit.JobEnvironment() + # Need to add this again so the hydra.job.set_env PYTHONPATH + # is also set when launching jobs. + add_pythonpath_to_sys_path() + os.environ["MASTER_ADDR"] = job_env.hostnames[0] + os.environ["MASTER_PORT"] = str(self.port) + os.environ["RANK"] = str(job_env.global_rank) + os.environ["LOCAL_RANK"] = str(job_env.local_rank) + os.environ["WORLD_SIZE"] = str(job_env.num_tasks) + + register_omegaconf_resolvers() + cfg_resolved = OmegaConf.to_container(self.cfg, resolve=False) + cfg_resolved = OmegaConf.create(cfg_resolved) + + trainer = instantiate(cfg_resolved.trainer, _recursive_=False) + trainer.run() + + def __call__(self): + job_env = submitit.JobEnvironment() + self.setup_job_info(job_env.job_id, job_env.global_rank) + try: + self.run_trainer() + except Exception as e: + # Log the exception. Then raise it again (as what SubmititRunner currently does). + message = format_exception(e) + logging.error(message) + raise e + + def setup_job_info(self, job_id, rank): + """Set up slurm job info""" + self.job_info = { + "job_id": job_id, + "rank": rank, + "cluster": self.cfg.get("cluster", None), + "experiment_log_dir": self.cfg.launcher.experiment_log_dir, + } + + self.has_setup = True + + +def add_pythonpath_to_sys_path(): + if "PYTHONPATH" not in os.environ or not os.environ["PYTHONPATH"]: + return + sys.path = os.environ["PYTHONPATH"].split(":") + sys.path + + +def main(args) -> None: + cfg = compose(config_name=args.config) + if cfg.launcher.experiment_log_dir is None: + cfg.launcher.experiment_log_dir = os.path.join( + os.getcwd(), "sam3_logs", args.config + ) + print("###################### Train App Config ####################") + print(OmegaConf.to_yaml(cfg)) + print("############################################################") + + add_pythonpath_to_sys_path() + makedir(cfg.launcher.experiment_log_dir) + with g_pathmgr.open( + os.path.join(cfg.launcher.experiment_log_dir, "config.yaml"), "w" + ) as f: + f.write(OmegaConf.to_yaml(cfg)) + + cfg_resolved = OmegaConf.to_container(cfg, resolve=False) + cfg_resolved = OmegaConf.create(cfg_resolved) + + with g_pathmgr.open( + os.path.join(cfg.launcher.experiment_log_dir, "config_resolved.yaml"), "w" + ) as f: + f.write(OmegaConf.to_yaml(cfg_resolved, resolve=True)) + + submitit_conf = cfg.get("submitit", None) + assert submitit_conf is not None, "Missing submitit config" + + experiment_log_dir = cfg.launcher.experiment_log_dir + print(f"Experiment Log Dir:\n{experiment_log_dir}") + submitit_dir = os.path.join(experiment_log_dir, "submitit_logs") + + # Prioritize cmd line args + cfg.launcher.gpus_per_node = ( + args.num_gpus if args.num_gpus is not None else cfg.launcher.gpus_per_node + ) + cfg.launcher.num_nodes = ( + args.num_nodes if args.num_nodes is not None else cfg.launcher.num_nodes + ) + submitit_conf.use_cluster = ( + args.use_cluster if args.use_cluster is not None else submitit_conf.use_cluster + ) + if submitit_conf.use_cluster: + executor = submitit.AutoExecutor(folder=submitit_dir) + submitit_conf.partition = ( + args.partition + if args.partition is not None + else submitit_conf.get("partition", None) + ) + submitit_conf.account = ( + args.account + if args.account is not None + else submitit_conf.get("account", None) + ) + submitit_conf.qos = ( + args.qos if args.qos is not None else submitit_conf.get("qos", None) + ) + job_kwargs = { + "timeout_min": 60 * submitit_conf.timeout_hour, + "name": ( + submitit_conf.name if hasattr(submitit_conf, "name") else args.config + ), + "slurm_partition": submitit_conf.partition, + "gpus_per_node": cfg.launcher.gpus_per_node, + "tasks_per_node": cfg.launcher.gpus_per_node, # one task per GPU + "cpus_per_task": submitit_conf.cpus_per_task, + "nodes": cfg.launcher.num_nodes, + "slurm_additional_parameters": { + "exclude": " ".join(submitit_conf.get("exclude_nodes", [])), + }, + } + if "include_nodes" in submitit_conf: + assert ( + len(submitit_conf["include_nodes"]) >= cfg.launcher.num_nodes + ), "Not enough nodes" + job_kwargs["slurm_additional_parameters"]["nodelist"] = " ".join( + submitit_conf["include_nodes"] + ) + if submitit_conf.account is not None: + job_kwargs["slurm_additional_parameters"]["account"] = submitit_conf.account + if submitit_conf.qos is not None: + job_kwargs["slurm_additional_parameters"]["qos"] = submitit_conf.qos + + if submitit_conf.get("mem_gb", None) is not None: + job_kwargs["mem_gb"] = submitit_conf.mem_gb + elif submitit_conf.get("mem", None) is not None: + job_kwargs["slurm_mem"] = submitit_conf.mem + + if submitit_conf.get("constraints", None) is not None: + job_kwargs["slurm_constraint"] = submitit_conf.constraints + + if submitit_conf.get("comment", None) is not None: + job_kwargs["slurm_comment"] = submitit_conf.comment + + # Supports only cpu-bind option within srun_args. New options can be added here + if submitit_conf.get("srun_args", None) is not None: + job_kwargs["slurm_srun_args"] = [] + if submitit_conf.srun_args.get("cpu_bind", None) is not None: + job_kwargs["slurm_srun_args"].extend( + ["--cpu-bind", submitit_conf.srun_args.cpu_bind] + ) + + print("###################### SLURM Config ####################") + print(job_kwargs) + print("##########################################") + executor.update_parameters(**job_kwargs) + + if ( + "job_array" in submitit_conf + and submitit_conf.job_array.get("num_tasks", -1) > 0 + ): + num_tasks = submitit_conf.job_array.num_tasks + job_array_config_dir = os.path.join( + cfg.launcher.experiment_log_dir, "job_array_configs" + ) + makedir(job_array_config_dir) + + job_indices = range(num_tasks) + ports = random.sample( + range(submitit_conf.port_range[0], submitit_conf.port_range[1] + 1), + k=len(job_indices), + ) + + jobs_runners_configs = [] + with executor.batch(): + task_index = 0 + for indices, main_port in tqdm(zip(job_indices, ports)): + curr_cfg = deepcopy(cfg) + curr_cfg.submitit.job_array["task_index"] = task_index + curr_cfg_resolved = handle_custom_resolving(cfg) + runner = SubmititRunner(main_port, curr_cfg) + job = executor.submit(runner) + jobs_runners_configs.append( + (job, runner, curr_cfg, curr_cfg_resolved) + ) + task_index += 1 + + for job, runner, job_cfg, job_cfg_resolved in jobs_runners_configs: + print("Submitit Job ID:", job.job_id) + + # Save job specific config + job_array_config_file = os.path.join( + job_array_config_dir, "{}.config.yaml".format(job.job_id) + ) + with g_pathmgr.open(job_array_config_file, "w") as f: + f.write(OmegaConf.to_yaml(job_cfg)) + + job_array_config_resolved_file = os.path.join( + job_array_config_dir, "{}.config_resolved.yaml".format(job.job_id) + ) + with g_pathmgr.open(job_array_config_resolved_file, "w") as f: + f.write(OmegaConf.to_yaml(job_cfg_resolved, resolve=True)) + + runner.setup_job_info(job.job_id, rank=0) + # runner.log_event(event_type=SlurmEvent.QUEUED) + else: + main_port = random.randint( + submitit_conf.port_range[0], submitit_conf.port_range[1] + ) + runner = SubmititRunner(main_port, cfg) + job = executor.submit(runner) + print(f"Submitit Job ID: {job.job_id}") + runner.setup_job_info(job.job_id, rank=0) + + else: + cfg.launcher.num_nodes = 1 + main_port = random.randint( + submitit_conf.port_range[0], submitit_conf.port_range[1] + ) + single_node_runner(cfg, main_port) + + +if __name__ == "__main__": + initialize_config_module("sam3.train", version_base="1.2") + parser = ArgumentParser() + parser.add_argument( + "-c", + "--config", + required=True, + type=str, + help="path to config file (e.g. configs/roboflow_v100_full_ft_100_images.yaml)", + ) + parser.add_argument( + "--use-cluster", + type=int, + default=None, + help="whether to launch on a cluster, 0: run locally, 1: run on a cluster", + ) + parser.add_argument("--partition", type=str, default=None, help="SLURM partition") + parser.add_argument("--account", type=str, default=None, help="SLURM account") + parser.add_argument("--qos", type=str, default=None, help="SLURM qos") + parser.add_argument( + "--num-gpus", type=int, default=None, help="number of GPUS per node" + ) + parser.add_argument("--num-nodes", type=int, default=None, help="Number of nodes") + args = parser.parse_args() + args.use_cluster = bool(args.use_cluster) if args.use_cluster is not None else None + register_omegaconf_resolvers() + main(args) diff --git a/detect_tools/sam3/sam3/train/trainer.py b/detect_tools/sam3/sam3/train/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..ac7c1b52321ec08c69ba90b3c8bee6d428669013 --- /dev/null +++ b/detect_tools/sam3/sam3/train/trainer.py @@ -0,0 +1,1193 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import contextlib +import fnmatch +import gc +import json +import logging +import math +import os +import time +from collections import OrderedDict +from dataclasses import dataclass, field +from typing import Any, Dict, List, Mapping, Optional + +import numpy as np + +import torch +import torch.distributed as dist +import torch.nn as nn +from hydra.utils import instantiate +from iopath.common.file_io import g_pathmgr + +from sam3.model.data_misc import BatchedDatapoint +from sam3.model.model_misc import SAM3Output +from sam3.model.utils.misc import copy_data_to_device + +from sam3.train.optim.optimizer import construct_optimizer + +from sam3.train.utils.checkpoint_utils import ( + assert_skipped_parameters_are_frozen, + exclude_params_matching_unix_pattern, + load_state_dict_into_model, + with_check_parameter_frozen, +) + +from sam3.train.utils.distributed import all_reduce_max, barrier, get_rank + +from sam3.train.utils.logger import Logger, setup_logging +from sam3.train.utils.train_utils import ( + AverageMeter, + collect_dict_keys, + DurationMeter, + get_amp_type, + get_machine_local_and_dist_rank, + get_resume_checkpoint, + human_readable_time, + is_dist_avail_and_initialized, + log_env_variables, + makedir, + MemMeter, + Phase, + ProgressMeter, + set_seeds, + setup_distributed_backend, +) + + +CORE_LOSS_KEY = "core_loss" + + +def unwrap_ddp_if_wrapped(model): + if isinstance(model, torch.nn.parallel.DistributedDataParallel): + return model.module + return model + + +@dataclass +class OptimAMPConf: + enabled: bool = False + amp_dtype: str = "float16" + + +@dataclass +class OptimConf: + optimizer: torch.optim.Optimizer = None + options: Optional[Dict[str, Any]] = None + param_group_modifiers: Optional[List] = None + amp: Optional[Dict[str, Any]] = None + gradient_clip: Any = None + gradient_logger: Any = None + + def __post_init__(self): + # amp + if not isinstance(self.amp, OptimAMPConf): + if self.amp is None: + self.amp = {} + assert isinstance(self.amp, Mapping) + self.amp = OptimAMPConf(**self.amp) + + +@dataclass +class DistributedConf: + backend: Optional[str] = None # inferred from accelerator type + comms_dtype: Optional[str] = None + find_unused_parameters: bool = False + timeout_mins: int = 30 + gradient_as_bucket_view: bool = False # PyTorch DDP default is False + static_graph: bool = False # PyTorch DDP default is False + + +@dataclass +class CudaConf: + cudnn_deterministic: bool = False + cudnn_benchmark: bool = True + allow_tf32: bool = False + # if not None, `matmul_allow_tf32` key will override `allow_tf32` for matmul + matmul_allow_tf32: Optional[bool] = None + # if not None, `cudnn_allow_tf32` key will override `allow_tf32` for cudnn + cudnn_allow_tf32: Optional[bool] = None + + +@dataclass +class CheckpointConf: + save_dir: str + save_freq: int + save_list: List[int] = field(default_factory=list) + model_weight_initializer: Any = None + save_best_meters: List[str] = None + skip_saving_parameters: List[str] = field(default_factory=list) + initialize_after_preemption: Optional[bool] = None + # if not None, training will be resumed from this checkpoint + resume_from: Optional[str] = None + + def infer_missing(self): + if self.initialize_after_preemption is None: + with_skip_saving = len(self.skip_saving_parameters) > 0 + self.initialize_after_preemption = with_skip_saving + return self + + +@dataclass +class LoggingConf: + log_dir: str + log_freq: int # In iterations + tensorboard_writer: Any + log_level_primary: str = "INFO" + log_level_secondary: str = "ERROR" + log_scalar_frequency: int = 100 + log_visual_frequency: int = 100 + scalar_keys_to_log: Optional[Dict[str, Any]] = None + log_batch_stats: bool = False + wandb_writer: Optional[Any] = None + + +class Trainer: + """ + Trainer supporting the DDP training strategies. + """ + + EPSILON = 1e-8 + + def __init__( + self, + *, # the order of these args can change at any time, so they are keyword-only + data: Dict[str, Any], + model: Dict[str, Any], + logging: Dict[str, Any], + checkpoint: Dict[str, Any], + max_epochs: int, + mode: str = "train", + accelerator: str = "cuda", + seed_value: int = 123, + val_epoch_freq: int = 1, + distributed: Dict[str, bool] = None, + cuda: Dict[str, bool] = None, + env_variables: Optional[Dict[str, Any]] = None, + optim: Optional[Dict[str, Any]] = None, + optim_overrides: Optional[List[Dict[str, Any]]] = None, + meters: Optional[Dict[str, Any]] = None, + loss: Optional[Dict[str, Any]] = None, + skip_first_val: bool = False, + skip_saving_ckpts: bool = False, + empty_gpu_mem_cache_after_eval: bool = True, + gradient_accumulation_steps: int = 1, + ): + self._setup_env_variables(env_variables) + self._setup_timers() + + self.data_conf = data + self.model_conf = model + self.logging_conf = LoggingConf(**logging) + self.checkpoint_conf = CheckpointConf(**checkpoint).infer_missing() + self.max_epochs = max_epochs + self.mode = mode + self.val_epoch_freq = val_epoch_freq + self.optim_conf = OptimConf(**optim) if optim is not None else OptimConf() + self.meters_conf = meters + self.loss_conf = loss + self.gradient_accumulation_steps = gradient_accumulation_steps + distributed = DistributedConf(**distributed or {}) + cuda = CudaConf(**cuda or {}) + self.where = 0.0 + + self.skip_first_val = skip_first_val + self.skip_saving_ckpts = skip_saving_ckpts + self.empty_gpu_mem_cache_after_eval = empty_gpu_mem_cache_after_eval + + self._infer_distributed_backend_if_none(distributed, accelerator) + + self._setup_device(accelerator) + + self._setup_torch_dist_and_backend(cuda, distributed) + + makedir(self.logging_conf.log_dir) + setup_logging( + __name__, + output_dir=self.logging_conf.log_dir, + rank=self.rank, + log_level_primary=self.logging_conf.log_level_primary, + log_level_secondary=self.logging_conf.log_level_secondary, + ) + + set_seeds(seed_value, self.max_epochs, self.distributed_rank) + log_env_variables() + + assert ( + is_dist_avail_and_initialized() + ), "Torch distributed needs to be initialized before calling the trainer." + + self._setup_components() # Except Optimizer everything is setup here. + self._move_to_device() + self._construct_optimizers() + self._setup_dataloaders() + + self.time_elapsed_meter = DurationMeter("Time Elapsed", self.device, ":.2f") + + if self.checkpoint_conf.resume_from is not None: + assert os.path.exists( + self.checkpoint_conf.resume_from + ), f"The 'resume_from' checkpoint {self.checkpoint_conf.resume_from} does not exist!" + dst = os.path.join(self.checkpoint_conf.save_dir, "checkpoint.pt") + if self.distributed_rank == 0 and not os.path.exists(dst): + # Copy the "resume_from" checkpoint to the checkpoint folder + # if there is not a checkpoint to resume from already there + makedir(self.checkpoint_conf.save_dir) + g_pathmgr.copy(self.checkpoint_conf.resume_from, dst) + barrier() + + self.load_checkpoint() + self._setup_ddp_distributed_training(distributed, accelerator) + barrier() + + def _setup_timers(self): + """ + Initializes counters for elapsed time and eta. + """ + self.start_time = time.time() + self.ckpt_time_elapsed = 0 + self.est_epoch_time = dict.fromkeys([Phase.TRAIN, Phase.VAL], 0) + + def _get_meters(self, phase_filters=None): + if self.meters is None: + return {} + meters = {} + for phase, phase_meters in self.meters.items(): + if phase_filters is not None and phase not in phase_filters: + continue + for key, key_meters in phase_meters.items(): + if key_meters is None: + continue + for name, meter in key_meters.items(): + meters[f"{phase}_{key}/{name}"] = meter + return meters + + def _infer_distributed_backend_if_none(self, distributed_conf, accelerator): + if distributed_conf.backend is None: + distributed_conf.backend = "nccl" if accelerator == "cuda" else "gloo" + + def _setup_env_variables(self, env_variables_conf) -> None: + if env_variables_conf is not None: + for variable_name, value in env_variables_conf.items(): + os.environ[variable_name] = value + + def _setup_torch_dist_and_backend(self, cuda_conf, distributed_conf) -> None: + if torch.cuda.is_available(): + torch.backends.cudnn.deterministic = cuda_conf.cudnn_deterministic + torch.backends.cudnn.benchmark = cuda_conf.cudnn_benchmark + torch.backends.cuda.matmul.allow_tf32 = ( + cuda_conf.matmul_allow_tf32 + if cuda_conf.matmul_allow_tf32 is not None + else cuda_conf.allow_tf32 + ) + torch.backends.cudnn.allow_tf32 = ( + cuda_conf.cudnn_allow_tf32 + if cuda_conf.cudnn_allow_tf32 is not None + else cuda_conf.allow_tf32 + ) + + self.rank = setup_distributed_backend( + distributed_conf.backend, distributed_conf.timeout_mins + ) + + def _setup_device(self, accelerator): + self.local_rank, self.distributed_rank = get_machine_local_and_dist_rank() + if accelerator == "cuda": + self.device = torch.device("cuda", self.local_rank) + torch.cuda.set_device(self.local_rank) + elif accelerator == "cpu": + self.device = torch.device("cpu") + else: + raise ValueError(f"Unsupported accelerator: {accelerator}") + + def _setup_ddp_distributed_training(self, distributed_conf, accelerator): + assert isinstance(self.model, torch.nn.Module) + + self.model = nn.parallel.DistributedDataParallel( + self.model, + device_ids=[self.local_rank] if accelerator == "cuda" else [], + find_unused_parameters=distributed_conf.find_unused_parameters, + gradient_as_bucket_view=distributed_conf.gradient_as_bucket_view, + static_graph=distributed_conf.static_graph, + ) + if distributed_conf.comms_dtype is not None: # noqa + from torch.distributed.algorithms import ddp_comm_hooks + + amp_type = get_amp_type(distributed_conf.comms_dtype) + if amp_type == torch.bfloat16: + hook = ddp_comm_hooks.default_hooks.bf16_compress_hook + logging.info("Enabling bfloat16 grad communication") + else: + hook = ddp_comm_hooks.default_hooks.fp16_compress_hook + logging.info("Enabling fp16 grad communication") + process_group = None + self.model.register_comm_hook(process_group, hook) + + def _move_to_device(self): + logging.info( + f"Moving components to device {self.device} and local rank {self.local_rank}." + ) + + self.model.to(self.device) + + logging.info( + f"Done moving components to device {self.device} and local rank {self.local_rank}." + ) + + def save_checkpoint(self, epoch, checkpoint_names=None): + if self.skip_saving_ckpts: + logging.info( + "skip_saving_ckpts is set to True. So, no checkpoints have been saved." + ) + return + checkpoint_folder = self.checkpoint_conf.save_dir + makedir(checkpoint_folder) + if checkpoint_names is None: + checkpoint_names = ["checkpoint"] + if ( + self.checkpoint_conf.save_freq > 0 + and (int(epoch) % self.checkpoint_conf.save_freq == 0) + ) or int(epoch) in self.checkpoint_conf.save_list: + checkpoint_names.append(f"checkpoint_{int(epoch)}") + + checkpoint_paths = [] + for ckpt_name in checkpoint_names: + checkpoint_paths.append(os.path.join(checkpoint_folder, f"{ckpt_name}.pt")) + + state_dict = unwrap_ddp_if_wrapped(self.model).state_dict() + state_dict = exclude_params_matching_unix_pattern( + patterns=self.checkpoint_conf.skip_saving_parameters, state_dict=state_dict + ) + + checkpoint = { + "model": state_dict, + "optimizer": self.optim.optimizer.state_dict(), + "epoch": epoch, + "loss": self.loss.state_dict(), + "steps": self.steps, + "time_elapsed": self.time_elapsed_meter.val, + "best_meter_values": self.best_meter_values, + } + if self.optim_conf.amp.enabled: + checkpoint["scaler"] = self.scaler.state_dict() + + # DDP checkpoints are only saved on rank 0 (all workers are identical) + if self.distributed_rank != 0: + return + + for checkpoint_path in checkpoint_paths: + self._save_checkpoint(checkpoint, checkpoint_path) + + def _save_checkpoint(self, checkpoint, checkpoint_path): + """ + Save a checkpoint while guarding against the job being killed in the middle + of checkpoint saving (which corrupts the checkpoint file and ruins the + entire training since usually only the last checkpoint is kept per run). + + We first save the new checkpoint to a temp file (with a '.tmp' suffix), and + and move it to overwrite the old checkpoint_path. + """ + checkpoint_path_tmp = f"{checkpoint_path}.tmp" + with g_pathmgr.open(checkpoint_path_tmp, "wb") as f: + torch.save(checkpoint, f) + # after torch.save is completed, replace the old checkpoint with the new one + if g_pathmgr.exists(checkpoint_path): + # remove the old checkpoint_path file first (otherwise g_pathmgr.mv fails) + g_pathmgr.rm(checkpoint_path) + success = g_pathmgr.mv(checkpoint_path_tmp, checkpoint_path) + assert success + + def load_checkpoint(self): + ckpt_path = get_resume_checkpoint(self.checkpoint_conf.save_dir) + if ckpt_path is None: + self._init_model_state() + else: + if self.checkpoint_conf.initialize_after_preemption: + self._call_model_initializer() + self._load_resuming_checkpoint(ckpt_path) + + def _init_model_state(self): + # Checking that parameters that won't be saved are indeed frozen + # We do this check here before even saving the model to catch errors + # are early as possible and not at the end of the first epoch + assert_skipped_parameters_are_frozen( + patterns=self.checkpoint_conf.skip_saving_parameters, + model=self.model, + ) + + # Checking that parameters that won't be saved are initialized from + # within the model definition, unless `initialize_after_preemption` + # is explicitly set to `True`. If not, this is a bug, and after + # preemption, the `skip_saving_parameters` will have random values + allow_init_skip_parameters = self.checkpoint_conf.initialize_after_preemption + with with_check_parameter_frozen( + patterns=self.checkpoint_conf.skip_saving_parameters, + model=self.model, + disabled=allow_init_skip_parameters, + ): + self._call_model_initializer() + + def _call_model_initializer(self): + model_weight_initializer = instantiate( + self.checkpoint_conf.model_weight_initializer + ) + if model_weight_initializer is not None: + logging.info( + f"Loading pretrained checkpoint from {self.checkpoint_conf.model_weight_initializer}" + ) + self.model = model_weight_initializer(model=self.model) + + def _load_resuming_checkpoint(self, ckpt_path: str): + logging.info(f"Resuming training from {ckpt_path}") + + with g_pathmgr.open(ckpt_path, "rb") as f: + checkpoint = torch.load(f, map_location="cpu") + load_state_dict_into_model( + model=self.model, + state_dict=checkpoint["model"], + ignore_missing_keys=self.checkpoint_conf.skip_saving_parameters, + ) + + self.optim.optimizer.load_state_dict(checkpoint["optimizer"]) + self.loss.load_state_dict(checkpoint["loss"], strict=True) + self.epoch = checkpoint["epoch"] + self.steps = checkpoint["steps"] + self.ckpt_time_elapsed = checkpoint.get("time_elapsed") + + if self.optim_conf.amp.enabled and "scaler" in checkpoint: + self.scaler.load_state_dict(checkpoint["scaler"]) + + self.best_meter_values = checkpoint.get("best_meter_values", {}) + + if "train_dataset" in checkpoint and self.train_dataset is not None: + self.train_dataset.load_checkpoint_state(checkpoint["train_dataset"]) + + def is_intermediate_val_epoch(self, epoch): + skip_epoch = self.skip_first_val and epoch == 0 + return ( + epoch % self.val_epoch_freq == 0 + and epoch < self.max_epochs - 1 + and not skip_epoch + ) + + def _find_loss(self, key: str): + if key in self.loss: + return self.loss[key] + + assert key != "all", "Loss must be specified for key='all'" + assert ( + "default" in self.loss + ), f"Key {key} not found in losss, and no default provided" + return self.loss["default"] + + def _find_meter(self, phase: str, key: str): + if key in self.meters[phase]: + return self.meters[phase][key] + + for cand_key, meter in self.meters[phase].items(): + if fnmatch.fnmatch(key, cand_key): + return meter + return None + + def _step( + self, + batch: BatchedDatapoint, + model: nn.Module, + phase: str, + ): + key, batch = batch.popitem() + batch = copy_data_to_device(batch, self.device, non_blocking=True) + + find_stages = model(batch) + find_targets = [ + unwrap_ddp_if_wrapped(model).back_convert(x) for x in batch.find_targets + ] + batch_size = len(batch.img_batch) + loss = self._find_loss(key)(find_stages, find_targets) + + loss_str = f"Losses/{phase}_{key}_loss" + + loss_log_str = os.path.join("Step_Losses", loss_str) + + # loss contains multiple sub-components we wish to log + step_losses = {} + if isinstance(loss, dict): + step_losses.update( + {f"Losses/{phase}_{key}_{k}": v for k, v in loss.items()} + ) + loss = self._log_loss_detailed_and_return_core_loss( + loss, loss_log_str, self.steps[phase] + ) + + if self.steps[phase] % self.logging_conf.log_scalar_frequency == 0: + self.logger.log( + loss_log_str, + loss, + self.steps[phase], + ) + + self.steps[phase] += 1 + + ret_tuple = {loss_str: loss}, batch_size, step_losses + + if phase not in self.meters: + return ret_tuple + + meters_dict = self._find_meter(phase, key) + if meters_dict is None: + return ret_tuple + if meters_dict is not None: + for _, meter in meters_dict.items(): + meter.update( + find_stages=find_stages, + find_metadatas=batch.find_metadatas, + model=model, + batch=batch, + key=key, + ) + # Cleanup memory + if isinstance(find_stages, SAM3Output): + for fs in find_stages: + for k in list(fs.keys()): + del fs[k] + + return ret_tuple + + def run(self): + assert self.mode in ["train", "train_only", "val"] + if self.mode == "train": + if self.epoch > 0: + logging.info(f"Resuming training from epoch: {self.epoch}") + # resuming from a checkpoint + if self.is_intermediate_val_epoch(self.epoch - 1): + logging.info("Running previous val epoch") + self.epoch -= 1 + self.run_val() + self.epoch += 1 + self.run_train() + self.run_val() + elif self.mode == "val": + self.run_val() + elif self.mode == "train_only": + self.run_train() + + def _setup_dataloaders(self): + self.train_dataset = None + self.val_dataset = None + + if self.mode in ["train", "val"]: + self.val_dataset = instantiate(self.data_conf.get(Phase.VAL, None)) + + if self.mode in ["train", "train_only"]: + self.train_dataset = instantiate(self.data_conf.train) + + def run_train(self): + while self.epoch < self.max_epochs: + dataloader = self.train_dataset.get_loader(epoch=int(self.epoch)) + barrier() + outs = self.train_epoch(dataloader) + self.logger.log_dict(outs, self.epoch) # Logged only on rank 0 + + # log train to text file. + if self.distributed_rank == 0: + with g_pathmgr.open( + os.path.join(self.logging_conf.log_dir, "train_stats.json"), + "a", + ) as f: + f.write(json.dumps(outs) + "\n") + + # Save checkpoint before validating + self.save_checkpoint(self.epoch + 1) + + del dataloader + gc.collect() + + # Run val, not running on last epoch since will run after the + # loop anyway + if self.is_intermediate_val_epoch(self.epoch): + self.run_val() + if torch.cuda.is_available() and self.empty_gpu_mem_cache_after_eval: + # release memory buffers held by the model during eval (which typically + # involves a lot more frames in video grounding that during training) + torch.cuda.empty_cache() + + if self.distributed_rank == 0: + self.best_meter_values.update(self._get_trainer_state("train")) + with g_pathmgr.open( + os.path.join(self.logging_conf.log_dir, "best_stats.json"), + "a", + ) as f: + f.write(json.dumps(self.best_meter_values) + "\n") + + self.epoch += 1 + # epoch was incremented in the loop but the val step runs out of the loop + self.epoch -= 1 + + def run_val(self): + if not self.val_dataset: + return + + dataloader = self.val_dataset.get_loader(epoch=int(self.epoch)) + outs = self.val_epoch(dataloader, phase=Phase.VAL) + del dataloader + gc.collect() + self.logger.log_dict(outs, self.epoch) # Logged only on rank 0 + + if self.distributed_rank == 0: + with g_pathmgr.open( + os.path.join(self.logging_conf.log_dir, "val_stats.json"), + "a", + ) as f: + f.write(json.dumps(outs) + "\n") + + def val_epoch(self, val_loader, phase): + batch_time = AverageMeter("Batch Time", self.device, ":.2f") + data_time = AverageMeter("Data Time", self.device, ":.2f") + mem = MemMeter("Mem (GB)", self.device, ":.2f") + + iters_per_epoch = len(val_loader) + + curr_phases = [phase] + curr_models = [self.model] + + loss_names = [] + for p in curr_phases: + for key in self.loss.keys(): + loss_names.append(f"Losses/{p}_{key}_loss") + + loss_mts = OrderedDict( + [(name, AverageMeter(name, self.device, ":.2e")) for name in loss_names] + ) + extra_loss_mts = {} + + for model in curr_models: + model.eval() + if hasattr(unwrap_ddp_if_wrapped(model), "on_validation_epoch_start"): + unwrap_ddp_if_wrapped(model).on_validation_epoch_start() + + progress = ProgressMeter( + iters_per_epoch, + [batch_time, data_time, mem, self.time_elapsed_meter, *loss_mts.values()], + self._get_meters(curr_phases), + prefix="Val Epoch: [{}]".format(self.epoch), + ) + + end = time.time() + + for data_iter, batch in enumerate(val_loader): + # measure data loading time + data_time.update(time.time() - end) + + # batch = batch.to(self.device, non_blocking=True) + + # compute output + with torch.no_grad(): + with torch.amp.autocast( + device_type="cuda", + enabled=(self.optim_conf.amp.enabled if self.optim_conf else False), + dtype=( + get_amp_type(self.optim_conf.amp.amp_dtype) + if self.optim_conf + else None + ), + ): + for phase, model in zip(curr_phases, curr_models): + loss_dict, batch_size, extra_losses = self._step( + batch, + model, + phase, + ) + + assert len(loss_dict) == 1 + loss_key, loss = loss_dict.popitem() + + if loss_key in loss_mts: + loss_mts[loss_key].update(loss.item(), batch_size) + + for k, v in extra_losses.items(): + if k not in extra_loss_mts: + extra_loss_mts[k] = AverageMeter(k, self.device, ":.2e") + extra_loss_mts[k].update(v.item(), batch_size) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + self.time_elapsed_meter.update( + time.time() - self.start_time + self.ckpt_time_elapsed + ) + + if torch.cuda.is_available(): + mem.update(reset_peak_usage=True) + + if data_iter % self.logging_conf.log_freq == 0: + progress.display(data_iter) + + if data_iter % self.logging_conf.log_scalar_frequency == 0: + # Log progress meters. + for progress_meter in progress.meters: + self.logger.log( + os.path.join("Step_Stats", phase, progress_meter.name), + progress_meter.val, + self.steps[Phase.VAL], + ) + + if data_iter % 10 == 0: + dist.barrier() + + self.est_epoch_time[phase] = batch_time.avg * iters_per_epoch + self._log_timers(phase) + for model in curr_models: + if hasattr(unwrap_ddp_if_wrapped(model), "on_validation_epoch_end"): + unwrap_ddp_if_wrapped(model).on_validation_epoch_end() + + out_dict = self._log_meters_and_save_best_ckpts(curr_phases) + + for k, v in loss_mts.items(): + out_dict[k] = v.avg + for k, v in extra_loss_mts.items(): + out_dict[k] = v.avg + + for phase in curr_phases: + out_dict.update(self._get_trainer_state(phase)) + self._reset_meters(curr_phases) + logging.info(f"Meters: {out_dict}") + return out_dict + + def _get_trainer_state(self, phase): + return { + "Trainer/where": self.where, + "Trainer/epoch": self.epoch, + f"Trainer/steps_{phase}": self.steps[phase], + } + + def train_epoch(self, train_loader): + # Init stat meters + batch_time_meter = AverageMeter("Batch Time", self.device, ":.2f") + data_time_meter = AverageMeter("Data Time", self.device, ":.2f") + mem_meter = MemMeter("Mem (GB)", self.device, ":.2f") + data_times = [] + phase = Phase.TRAIN + + iters_per_epoch = len(train_loader) + + loss_names = [] + for batch_key in self.loss.keys(): + loss_names.append(f"Losses/{phase}_{batch_key}_loss") + + loss_mts = OrderedDict( + [(name, AverageMeter(name, self.device, ":.2e")) for name in loss_names] + ) + extra_loss_mts = {} + + progress = ProgressMeter( + iters_per_epoch, + [ + batch_time_meter, + data_time_meter, + mem_meter, + self.time_elapsed_meter, + *loss_mts.values(), + ], + self._get_meters([phase]), + prefix="Train Epoch: [{}]".format(self.epoch), + ) + + # Model training loop + self.model.train() + end = time.time() + + for data_iter, batch in enumerate(train_loader): + # measure data loading time + data_time_meter.update(time.time() - end) + data_times.append(data_time_meter.val) + # batch = batch.to( + # self.device, non_blocking=True + # ) # move tensors in a tensorclass + + try: + self._run_step(batch, phase, loss_mts, extra_loss_mts) + + # compute gradient and do optim step + exact_epoch = self.epoch + float(data_iter) / iters_per_epoch + self.where = float(exact_epoch) / self.max_epochs + assert self.where <= 1 + self.EPSILON + if self.where < 1.0: + self.optim.step_schedulers( + self.where, step=int(exact_epoch * iters_per_epoch) + ) + else: + logging.warning( + f"Skipping scheduler update since the training is at the end, i.e, {self.where} of [0,1]." + ) + + # Log schedulers + if data_iter % self.logging_conf.log_scalar_frequency == 0: + for j, param_group in enumerate(self.optim.optimizer.param_groups): + for option in self.optim.schedulers[j]: + optim_prefix = ( + "" + f"{j}_" + if len(self.optim.optimizer.param_groups) > 1 + else "" + ) + self.logger.log( + os.path.join("Optim", f"{optim_prefix}", option), + param_group[option], + self.steps[phase], + ) + + # Clipping gradients and detecting diverging gradients + if self.gradient_clipper is not None: + self.scaler.unscale_(self.optim.optimizer) + self.gradient_clipper(model=self.model) + + if self.gradient_logger is not None: + self.gradient_logger( + self.model, rank=self.distributed_rank, where=self.where + ) + + # Optimizer step: the scaler will make sure gradients are not + # applied if the gradients are infinite + self.scaler.step(self.optim.optimizer) + self.scaler.update() + + # measure elapsed time + batch_time_meter.update(time.time() - end) + end = time.time() + + self.time_elapsed_meter.update( + time.time() - self.start_time + self.ckpt_time_elapsed + ) + + mem_meter.update(reset_peak_usage=True) + if data_iter % self.logging_conf.log_freq == 0: + progress.display(data_iter) + + if data_iter % self.logging_conf.log_scalar_frequency == 0: + # Log progress meters. + for progress_meter in progress.meters: + self.logger.log( + os.path.join("Step_Stats", phase, progress_meter.name), + progress_meter.val, + self.steps[phase], + ) + + # Catching NaN/Inf errors in the loss + except FloatingPointError as e: + raise e + + self.est_epoch_time[Phase.TRAIN] = batch_time_meter.avg * iters_per_epoch + self._log_timers(Phase.TRAIN) + self._log_sync_data_times(Phase.TRAIN, data_times) + + out_dict = self._log_meters_and_save_best_ckpts([Phase.TRAIN]) + + for k, v in loss_mts.items(): + out_dict[k] = v.avg + for k, v in extra_loss_mts.items(): + out_dict[k] = v.avg + out_dict.update(self._get_trainer_state(phase)) + logging.info(f"Losses and meters: {out_dict}") + self._reset_meters([phase]) + return out_dict + + def _log_sync_data_times(self, phase, data_times): + data_times = all_reduce_max(torch.tensor(data_times)).tolist() + steps = range(self.steps[phase] - len(data_times), self.steps[phase]) + for step, data_time in zip(steps, data_times): + if step % self.logging_conf.log_scalar_frequency == 0: + self.logger.log( + os.path.join("Step_Stats", phase, "Data Time Synced"), + data_time, + step, + ) + + def _run_step( + self, + batch: BatchedDatapoint, + phase: str, + loss_mts: Dict[str, AverageMeter], + extra_loss_mts: Dict[str, AverageMeter], + raise_on_error: bool = True, + ): + """ + Run the forward / backward + """ + + # it's important to set grads to None, especially with Adam since 0 + # grads will also update a model even if the step doesn't produce + # gradients + self.optim.zero_grad(set_to_none=True) + + if self.gradient_accumulation_steps > 1: + assert isinstance( + batch, list + ), f"Expected a list of batches, got {type(batch)}" + assert ( + len(batch) == self.gradient_accumulation_steps + ), f"Expected {self.gradient_accumulation_steps} batches, got {len(batch)}" + accum_steps = len(batch) + else: + accum_steps = 1 + batch = [batch] + + for i, chunked_batch in enumerate(batch): + ddp_context = ( + self.model.no_sync() + if i < accum_steps - 1 + else contextlib.nullcontext() + ) + with ddp_context: + with torch.amp.autocast( + device_type="cuda", + enabled=self.optim_conf.amp.enabled, + dtype=get_amp_type(self.optim_conf.amp.amp_dtype), + ): + loss_dict, batch_size, extra_losses = self._step( + chunked_batch, + self.model, + phase, + ) + + assert len(loss_dict) == 1 + loss_key, loss = loss_dict.popitem() + + if not math.isfinite(loss.item()): + error_msg = f"Loss is {loss.item()}, attempting to stop training" + logging.error(error_msg) + if raise_on_error: + raise FloatingPointError(error_msg) + else: + return + + self.scaler.scale(loss).backward() + loss_mts[loss_key].update(loss.item(), batch_size) + for extra_loss_key, extra_loss in extra_losses.items(): + if extra_loss_key not in extra_loss_mts: + extra_loss_mts[extra_loss_key] = AverageMeter( + extra_loss_key, self.device, ":.2e" + ) + extra_loss_mts[extra_loss_key].update(extra_loss.item(), batch_size) + + def _log_meters_and_save_best_ckpts(self, phases: List[str]): + logging.info("Synchronizing meters") + out_dict = {} + checkpoint_save_keys = [] + for key, meter in self._get_meters(phases).items(): + meter_output = meter.compute_synced() + is_better_check = getattr(meter, "is_better", None) + + for meter_subkey, meter_value in meter_output.items(): + out_dict[os.path.join("Meters_train", key, meter_subkey)] = meter_value + + if is_better_check is None: + continue + + tracked_meter_key = os.path.join(key, meter_subkey) + if tracked_meter_key not in self.best_meter_values or is_better_check( + meter_value, + self.best_meter_values[tracked_meter_key], + ): + self.best_meter_values[tracked_meter_key] = meter_value + + if ( + self.checkpoint_conf.save_best_meters is not None + and key in self.checkpoint_conf.save_best_meters + ): + checkpoint_save_keys.append(tracked_meter_key.replace("/", "_")) + + if len(checkpoint_save_keys) > 0: + self.save_checkpoint(self.epoch + 1, checkpoint_save_keys) + + return out_dict + + def _log_timers(self, phase): + time_remaining = 0 + epochs_remaining = self.max_epochs - self.epoch - 1 + val_epochs_remaining = sum( + n % self.val_epoch_freq == 0 for n in range(self.epoch, self.max_epochs) + ) + + # Adding the guaranteed val run at the end if val_epoch_freq doesn't coincide with + # the end epoch. + if (self.max_epochs - 1) % self.val_epoch_freq != 0: + val_epochs_remaining += 1 + + # Remove the current val run from estimate + if phase == Phase.VAL: + val_epochs_remaining -= 1 + + time_remaining += ( + epochs_remaining * self.est_epoch_time[Phase.TRAIN] + + val_epochs_remaining * self.est_epoch_time[Phase.VAL] + ) + + self.logger.log( + os.path.join("Step_Stats", phase, self.time_elapsed_meter.name), + self.time_elapsed_meter.val, + self.steps[phase], + ) + + logging.info(f"Estimated time remaining: {human_readable_time(time_remaining)}") + + def _reset_meters(self, phases: str) -> None: + for meter in self._get_meters(phases).values(): + meter.reset() + + def _check_val_key_match(self, val_keys, phase): + if val_keys is not None: + # Check if there are any duplicates + assert len(val_keys) == len( + set(val_keys) + ), f"Duplicate keys in val datasets, keys: {val_keys}" + + # Check that the keys match the meter keys + if self.meters_conf is not None and phase in self.meters_conf: + assert set(val_keys) == set(self.meters_conf[phase].keys()), ( + f"Keys in val datasets do not match the keys in meters." + f"\nMissing in meters: {set(val_keys) - set(self.meters_conf[phase].keys())}" + f"\nMissing in val datasets: {set(self.meters_conf[phase].keys()) - set(val_keys)}" + ) + + if self.loss_conf is not None: + loss_keys = set(self.loss_conf.keys()) - set(["all"]) + if "default" not in loss_keys: + for k in val_keys: + assert ( + k in loss_keys + ), f"Error: key {k} is not defined in the losses, and no default is set" + + def _setup_components(self): + # Get the keys for all the val datasets, if any + val_phase = Phase.VAL + val_keys = None + if self.data_conf.get(val_phase, None) is not None: + val_keys = collect_dict_keys(self.data_conf[val_phase]) + # Additional checks on the sanity of the config for val datasets + self._check_val_key_match(val_keys, phase=val_phase) + + logging.info("Setting up components: Model, loss, optim, meters etc.") + self.epoch = 0 + self.steps = {Phase.TRAIN: 0, Phase.VAL: 0} + + self.logger = Logger(self.logging_conf) + + self.model = instantiate(self.model_conf, _convert_="all") + print_model_summary(self.model) + + self.loss = None + if self.loss_conf: + self.loss = { + key: el # wrap_base_loss(el) + for (key, el) in instantiate(self.loss_conf, _convert_="all").items() + } + self.loss = nn.ModuleDict(self.loss) + + self.meters = {} + self.best_meter_values = {} + if self.meters_conf: + self.meters = instantiate(self.meters_conf, _convert_="all") + + self.scaler = torch.amp.GradScaler( + self.device, + enabled=self.optim_conf.amp.enabled if self.optim_conf else False, + ) + + self.gradient_clipper = ( + instantiate(self.optim_conf.gradient_clip) if self.optim_conf else None + ) + self.gradient_logger = ( + instantiate(self.optim_conf.gradient_logger) if self.optim_conf else None + ) + + logging.info("Finished setting up components: Model, loss, optim, meters etc.") + + def _construct_optimizers(self): + self.optim = construct_optimizer( + self.model, + self.optim_conf.optimizer, + self.optim_conf.options, + self.optim_conf.param_group_modifiers, + ) + + def _log_loss_detailed_and_return_core_loss(self, loss, loss_str, step): + core_loss = loss.pop(CORE_LOSS_KEY) + if step % self.logging_conf.log_scalar_frequency == 0: + for k in loss: + log_str = os.path.join(loss_str, k) + self.logger.log(log_str, loss[k], step) + return core_loss + + +def print_model_summary(model: torch.nn.Module, log_dir: str = ""): + """ + Prints the model and the number of parameters in the model. + # Multiple packages provide this info in a nice table format + # However, they need us to provide an `input` (as they also write down the output sizes) + # Our models are complex, and a single input is restrictive. + # https://github.com/sksq96/pytorch-summary + # https://github.com/nmhkahn/torchsummaryX + """ + if get_rank() != 0: + return + param_kwargs = {} + trainable_parameters = sum( + p.numel() for p in model.parameters(**param_kwargs) if p.requires_grad + ) + total_parameters = sum(p.numel() for p in model.parameters(**param_kwargs)) + non_trainable_parameters = total_parameters - trainable_parameters + logging.info("==" * 10) + logging.info(f"Summary for model {type(model)}") + logging.info(f"Model is {model}") + logging.info(f"\tTotal parameters {get_human_readable_count(total_parameters)}") + logging.info( + f"\tTrainable parameters {get_human_readable_count(trainable_parameters)}" + ) + logging.info( + f"\tNon-Trainable parameters {get_human_readable_count(non_trainable_parameters)}" + ) + logging.info("==" * 10) + + if log_dir: + output_fpath = os.path.join(log_dir, "model.txt") + with g_pathmgr.open(output_fpath, "w") as f: + print(model, file=f) + + +PARAMETER_NUM_UNITS = [" ", "K", "M", "B", "T"] + + +def get_human_readable_count(number: int) -> str: + """ + Abbreviates an integer number with K, M, B, T for thousands, millions, + billions and trillions, respectively. + Examples: + >>> get_human_readable_count(123) + '123 ' + >>> get_human_readable_count(1234) # (one thousand) + '1.2 K' + >>> get_human_readable_count(2e6) # (two million) + '2.0 M' + >>> get_human_readable_count(3e9) # (three billion) + '3.0 B' + >>> get_human_readable_count(4e14) # (four hundred trillion) + '400 T' + >>> get_human_readable_count(5e15) # (more than trillion) + '5,000 T' + Args: + number: a positive integer number + Return: + A string formatted according to the pattern described above. + """ + assert number >= 0 + labels = PARAMETER_NUM_UNITS + num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1) + num_groups = int(np.ceil(num_digits / 3)) + num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions + shift = -3 * (num_groups - 1) + number = number * (10**shift) + index = num_groups - 1 + if index < 1 or number >= 100: + return f"{int(number):,d} {labels[index]}" + else: + return f"{number:,.1f} {labels[index]}" diff --git a/detect_tools/sam3/sam3/train/transforms/__init__.py b/detect_tools/sam3/sam3/train/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/train/transforms/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/train/transforms/basic.py b/detect_tools/sam3/sam3/train/transforms/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8cf1f9da3ebe9da53bad8987d05232735c5f67 --- /dev/null +++ b/detect_tools/sam3/sam3/train/transforms/basic.py @@ -0,0 +1,455 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Transforms and data augmentation for both image + bbox. +""" + +import math +import random +from typing import Iterable + +import PIL +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as F + +from sam3.model.box_ops import box_xyxy_to_cxcywh +from sam3.model.data_misc import interpolate + + +def crop(image, target, region): + cropped_image = F.crop(image, *region) + + target = target.copy() + i, j, h, w = region + + # should we do something wrt the original size? + target["size"] = torch.tensor([h, w]) + + fields = ["labels", "area", "iscrowd", "positive_map"] + + if "boxes" in target: + boxes = target["boxes"] + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) + target["boxes"] = cropped_boxes.reshape(-1, 4) + target["area"] = area + fields.append("boxes") + + if "input_boxes" in target: + boxes = target["input_boxes"] + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + target["input_boxes"] = cropped_boxes.reshape(-1, 4) + + if "masks" in target: + # FIXME should we update the area here if there are no boxes? + target["masks"] = target["masks"][:, i : i + h, j : j + w] + fields.append("masks") + + # remove elements for which the boxes or masks that have zero area + if "boxes" in target or "masks" in target: + # favor boxes selection when defining which elements to keep + # this is compatible with previous implementation + if "boxes" in target: + cropped_boxes = target["boxes"].reshape(-1, 2, 2) + keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) + else: + keep = target["masks"].flatten(1).any(1) + + for field in fields: + if field in target: + target[field] = target[field][keep] + + return cropped_image, target + + +def hflip(image, target): + flipped_image = F.hflip(image) + + w, h = image.size + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor( + [-1, 1, -1, 1] + ) + torch.as_tensor([w, 0, w, 0]) + target["boxes"] = boxes + + if "input_boxes" in target: + boxes = target["input_boxes"] + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor( + [-1, 1, -1, 1] + ) + torch.as_tensor([w, 0, w, 0]) + target["input_boxes"] = boxes + + if "masks" in target: + target["masks"] = target["masks"].flip(-1) + + if "text_input" in target: + text_input = ( + target["text_input"] + .replace("left", "[TMP]") + .replace("right", "left") + .replace("[TMP]", "right") + ) + target["text_input"] = text_input + + return flipped_image, target + + +def resize(image, target, size, max_size=None, square=False): + # size can be min_size (scalar) or (w, h) tuple + + def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size[::-1] + else: + return get_size_with_aspect_ratio(image_size, size, max_size) + + if square: + size = size, size + else: + size = get_size(image.size, size, max_size) + rescaled_image = F.resize(image, size) + + if target is None: + return rescaled_image, None + + ratios = tuple( + float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size) + ) + ratio_width, ratio_height = ratios + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + scaled_boxes = boxes * torch.as_tensor( + [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32 + ) + target["boxes"] = scaled_boxes + if "input_boxes" in target: + boxes = target["input_boxes"] + scaled_boxes = boxes * torch.as_tensor( + [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32 + ) + target["input_boxes"] = scaled_boxes + + if "area" in target: + area = target["area"] + scaled_area = area * (ratio_width * ratio_height) + target["area"] = scaled_area + + h, w = size + target["size"] = torch.tensor([h, w]) + + if "masks" in target: + target["masks"] = ( + interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] + > 0.5 + ) + + return rescaled_image, target + + +def pad(image, target, padding): + if len(padding) == 2: + # assumes that we only pad on the bottom right corners + padded_image = F.pad(image, (0, 0, padding[0], padding[1])) + else: + # left, top, right, bottom + padded_image = F.pad(image, (padding[0], padding[1], padding[2], padding[3])) + if target is None: + return padded_image, None + target = target.copy() + + w, h = padded_image.size + + # should we do something wrt the original size? + target["size"] = torch.tensor([h, w]) + if "boxes" in target and len(padding) == 4: + boxes = target["boxes"] + boxes = boxes + torch.as_tensor( + [padding[0], padding[1], padding[0], padding[1]], dtype=torch.float32 + ) + target["boxes"] = boxes + + if "input_boxes" in target and len(padding) == 4: + boxes = target["input_boxes"] + boxes = boxes + torch.as_tensor( + [padding[0], padding[1], padding[0], padding[1]], dtype=torch.float32 + ) + target["input_boxes"] = boxes + + if "masks" in target: + if len(padding) == 2: + target["masks"] = torch.nn.functional.pad( + target["masks"], (0, padding[0], 0, padding[1]) + ) + else: + target["masks"] = torch.nn.functional.pad( + target["masks"], (padding[0], padding[2], padding[1], padding[3]) + ) + return padded_image, target + + +class RandomCrop: + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + region = T.RandomCrop.get_params(img, self.size) + return crop(img, target, region) + + +class RandomSizeCrop: + def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False): + self.min_size = min_size + self.max_size = max_size + self.respect_boxes = respect_boxes # if True we can't crop a box out + + def __call__(self, img: PIL.Image.Image, target: dict): + init_boxes = len(target["boxes"]) + init_boxes_tensor = target["boxes"].clone() + if self.respect_boxes and init_boxes > 0: + minW, minH, maxW, maxH = ( + min(img.width, self.min_size), + min(img.width, self.min_size), + min(img.width, self.max_size), + min(img.height, self.max_size), + ) + minX, minY = ( + target["boxes"][:, 0].max().item() + 10.0, + target["boxes"][:, 1].max().item() + 10.0, + ) + minX = min(img.width, minX) + minY = min(img.height, minY) + maxX, maxY = ( + target["boxes"][:, 2].min().item() - 10, + target["boxes"][:, 3].min().item() - 10, + ) + maxX = max(0.0, maxX) + maxY = max(0.0, maxY) + minW = max(minW, minX - maxX) + minH = max(minH, minY - maxY) + w = random.uniform(minW, max(minW, maxW)) + h = random.uniform(minH, max(minH, maxH)) + if minX > maxX: + # i = random.uniform(max(0, minX - w + 1), max(maxX, max(0, minX - w + 1))) + i = random.uniform(max(0, minX - w), max(maxX, max(0, minX - w))) + else: + i = random.uniform( + max(0, minX - w + 1), max(maxX - 1, max(0, minX - w + 1)) + ) + if minY > maxY: + # j = random.uniform(max(0, minY - h + 1), max(maxY, max(0, minY - h + 1))) + j = random.uniform(max(0, minY - h), max(maxY, max(0, minY - h))) + else: + j = random.uniform( + max(0, minY - h + 1), max(maxY - 1, max(0, minY - h + 1)) + ) + result_img, result_target = crop(img, target, [j, i, h, w]) + assert ( + len(result_target["boxes"]) == init_boxes + ), f"img_w={img.width}\timg_h={img.height}\tminX={minX}\tminY={minY}\tmaxX={maxX}\tmaxY={maxY}\tminW={minW}\tminH={minH}\tmaxW={maxW}\tmaxH={maxH}\tw={w}\th={h}\ti={i}\tj={j}\tinit_boxes={init_boxes_tensor}\tresults={result_target['boxes']}" + + return result_img, result_target + else: + w = random.randint(self.min_size, min(img.width, self.max_size)) + h = random.randint(self.min_size, min(img.height, self.max_size)) + region = T.RandomCrop.get_params(img, (h, w)) + result_img, result_target = crop(img, target, region) + return result_img, result_target + + +class CenterCrop: + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + image_width, image_height = img.size + crop_height, crop_width = self.size + crop_top = int(round((image_height - crop_height) / 2.0)) + crop_left = int(round((image_width - crop_width) / 2.0)) + return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) + + +class RandomHorizontalFlip: + def __init__(self, p=0.5): + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return hflip(img, target) + return img, target + + +class RandomResize: + def __init__(self, sizes, max_size=None, square=False): + if isinstance(sizes, int): + sizes = (sizes,) + assert isinstance(sizes, Iterable) + self.sizes = list(sizes) + self.max_size = max_size + self.square = square + + def __call__(self, img, target=None): + size = random.choice(self.sizes) + return resize(img, target, size, self.max_size, square=self.square) + + +class RandomPad: + def __init__(self, max_pad): + self.max_pad = max_pad + + def __call__(self, img, target): + pad_x = random.randint(0, self.max_pad) + pad_y = random.randint(0, self.max_pad) + return pad(img, target, (pad_x, pad_y)) + + +class PadToSize: + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + w, h = img.size + pad_x = self.size - w + pad_y = self.size - h + assert pad_x >= 0 and pad_y >= 0 + pad_left = random.randint(0, pad_x) + pad_right = pad_x - pad_left + pad_top = random.randint(0, pad_y) + pad_bottom = pad_y - pad_top + return pad(img, target, (pad_left, pad_top, pad_right, pad_bottom)) + + +class Identity: + def __call__(self, img, target): + return img, target + + +class RandomSelect: + """ + Randomly selects between transforms1 and transforms2, + with probability p for transforms1 and (1 - p) for transforms2 + """ + + def __init__(self, transforms1=None, transforms2=None, p=0.5): + self.transforms1 = transforms1 or Identity() + self.transforms2 = transforms2 or Identity() + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return self.transforms1(img, target) + return self.transforms2(img, target) + + +class ToTensor: + def __call__(self, img, target): + return F.to_tensor(img), target + + +class RandomErasing: + def __init__(self, *args, **kwargs): + self.eraser = T.RandomErasing(*args, **kwargs) + + def __call__(self, img, target): + return self.eraser(img), target + + +class Normalize: + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, target=None): + image = F.normalize(image, mean=self.mean, std=self.std) + if target is None: + return image, None + target = target.copy() + h, w = image.shape[-2:] + if "boxes" in target: + boxes = target["boxes"] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) + target["boxes"] = boxes + if "input_boxes" in target: + boxes = target["input_boxes"] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) + target["input_boxes"] = boxes + return image, target + + +class RemoveDifficult: + def __init__(self, enabled=False): + self.remove_difficult = enabled + + def __call__(self, image, target=None): + if target is None: + return image, None + target = target.copy() + keep = ~target["iscrowd"].to(torch.bool) | (not self.remove_difficult) + if "boxes" in target: + target["boxes"] = target["boxes"][keep] + target["labels"] = target["labels"][keep] + target["iscrowd"] = target["iscrowd"][keep] + return image, target + + +class Compose: + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string + + +def get_random_resize_scales(size, min_size, rounded): + stride = 128 if rounded else 32 + min_size = int(stride * math.ceil(min_size / stride)) + scales = list(range(min_size, size + 1, stride)) + return scales + + +def get_random_resize_max_size(size, ratio=5 / 3): + max_size = round(ratio * size) + return max_size diff --git a/detect_tools/sam3/sam3/train/transforms/basic_for_api.py b/detect_tools/sam3/sam3/train/transforms/basic_for_api.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ec2af4c482116e591f27c0e6676102469367c1 --- /dev/null +++ b/detect_tools/sam3/sam3/train/transforms/basic_for_api.py @@ -0,0 +1,1396 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Transforms and data augmentation for both image + bbox. +""" + +import logging + +import numbers +import random +from collections.abc import Sequence +from typing import Iterable + +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as F +import torchvision.transforms.v2.functional as Fv2 + +from PIL import Image as PILImage + +from sam3.model.box_ops import box_xyxy_to_cxcywh, masks_to_boxes +from sam3.train.data.sam3_image_dataset import Datapoint +from torchvision.transforms import InterpolationMode + + +def crop( + datapoint, + index, + region, + v2=False, + check_validity=True, + check_input_validity=True, + recompute_box_from_mask=False, +): + if v2: + rtop, rleft, rheight, rwidth = (int(round(r)) for r in region) + datapoint.images[index].data = Fv2.crop( + datapoint.images[index].data, + top=rtop, + left=rleft, + height=rheight, + width=rwidth, + ) + else: + datapoint.images[index].data = F.crop(datapoint.images[index].data, *region) + + i, j, h, w = region + + # should we do something wrt the original size? + datapoint.images[index].size = (h, w) + + for obj in datapoint.images[index].objects: + # crop the mask + if obj.segment is not None: + obj.segment = F.crop(obj.segment, int(i), int(j), int(h), int(w)) + + # crop the bounding box + if recompute_box_from_mask and obj.segment is not None: + # here the boxes are still in XYXY format with absolute coordinates (they are + # converted to CxCyWH with relative coordinates in basic_for_api.NormalizeAPI) + obj.bbox, obj.area = get_bbox_xyxy_abs_coords_from_mask(obj.segment) + else: + if recompute_box_from_mask and obj.segment is None and obj.area > 0: + logging.warning( + "Cannot recompute bounding box from mask since `obj.segment` is None. " + "Falling back to directly cropping from the input bounding box." + ) + boxes = obj.bbox.view(1, 4) + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + obj.area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) + obj.bbox = cropped_boxes.reshape(-1, 4) + + for query in datapoint.find_queries: + if query.semantic_target is not None: + query.semantic_target = F.crop( + query.semantic_target, int(i), int(j), int(h), int(w) + ) + if query.image_id == index and query.input_bbox is not None: + boxes = query.input_bbox + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + + # cur_area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) + # if check_input_validity: + # assert ( + # (cur_area > 0).all().item() + # ), "Some input box got cropped out by the crop transform" + + query.input_bbox = cropped_boxes.reshape(-1, 4) + if query.image_id == index and query.input_points is not None: + print( + "Warning! Point cropping with this function may lead to unexpected results" + ) + points = query.input_points + # Unlike right-lower box edges, which are exclusive, the + # point must be in [0, length-1], hence the -1 + max_size = torch.as_tensor([w, h], dtype=torch.float32) - 1 + cropped_points = points - torch.as_tensor([j, i, 0], dtype=torch.float32) + cropped_points[:, :, :2] = torch.min(cropped_points[:, :, :2], max_size) + cropped_points[:, :, :2] = cropped_points[:, :, :2].clamp(min=0) + query.input_points = cropped_points + + if check_validity: + # Check that all boxes are still valid + for obj in datapoint.images[index].objects: + assert obj.area > 0, "Box {} has no area".format(obj.bbox) + + return datapoint + + +def hflip(datapoint, index): + datapoint.images[index].data = F.hflip(datapoint.images[index].data) + + w, h = datapoint.images[index].data.size + for obj in datapoint.images[index].objects: + boxes = obj.bbox.view(1, 4) + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor( + [-1, 1, -1, 1] + ) + torch.as_tensor([w, 0, w, 0]) + obj.bbox = boxes + if obj.segment is not None: + obj.segment = F.hflip(obj.segment) + + for query in datapoint.find_queries: + if query.semantic_target is not None: + query.semantic_target = F.hflip(query.semantic_target) + if query.image_id == index and query.input_bbox is not None: + boxes = query.input_bbox + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor( + [-1, 1, -1, 1] + ) + torch.as_tensor([w, 0, w, 0]) + query.input_bbox = boxes + if query.image_id == index and query.input_points is not None: + points = query.input_points + points = points * torch.as_tensor([-1, 1, 1]) + torch.as_tensor([w, 0, 0]) + query.input_points = points + return datapoint + + +def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = max_size * min_original_size / max_original_size + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = int(round(size)) + oh = int(round(size * h / w)) + else: + oh = int(round(size)) + ow = int(round(size * w / h)) + + return (oh, ow) + + +def resize(datapoint, index, size, max_size=None, square=False, v2=False): + # size can be min_size (scalar) or (w, h) tuple + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size[::-1] + else: + return get_size_with_aspect_ratio(image_size, size, max_size) + + if square: + size = size, size + else: + cur_size = ( + datapoint.images[index].data.size()[-2:][::-1] + if v2 + else datapoint.images[index].data.size + ) + size = get_size(cur_size, size, max_size) + + old_size = ( + datapoint.images[index].data.size()[-2:][::-1] + if v2 + else datapoint.images[index].data.size + ) + if v2: + datapoint.images[index].data = Fv2.resize( + datapoint.images[index].data, size, antialias=True + ) + else: + datapoint.images[index].data = F.resize(datapoint.images[index].data, size) + + new_size = ( + datapoint.images[index].data.size()[-2:][::-1] + if v2 + else datapoint.images[index].data.size + ) + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(new_size, old_size)) + ratio_width, ratio_height = ratios + + for obj in datapoint.images[index].objects: + boxes = obj.bbox.view(1, 4) + scaled_boxes = boxes * torch.as_tensor( + [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32 + ) + obj.bbox = scaled_boxes + obj.area *= ratio_width * ratio_height + if obj.segment is not None: + obj.segment = F.resize(obj.segment[None, None], size).squeeze() + + for query in datapoint.find_queries: + if query.semantic_target is not None: + query.semantic_target = F.resize( + query.semantic_target[None, None], size + ).squeeze() + if query.image_id == index and query.input_bbox is not None: + boxes = query.input_bbox + scaled_boxes = boxes * torch.as_tensor( + [ratio_width, ratio_height, ratio_width, ratio_height], + dtype=torch.float32, + ) + query.input_bbox = scaled_boxes + if query.image_id == index and query.input_points is not None: + points = query.input_points + scaled_points = points * torch.as_tensor( + [ratio_width, ratio_height, 1], + dtype=torch.float32, + ) + query.input_points = scaled_points + + h, w = size + datapoint.images[index].size = (h, w) + return datapoint + + +def pad(datapoint, index, padding, v2=False): + old_h, old_w = datapoint.images[index].size + h, w = old_h, old_w + if len(padding) == 2: + # assumes that we only pad on the bottom right corners + if v2: + datapoint.images[index].data = Fv2.pad( + datapoint.images[index].data, (0, 0, padding[0], padding[1]) + ) + else: + datapoint.images[index].data = F.pad( + datapoint.images[index].data, (0, 0, padding[0], padding[1]) + ) + h += padding[1] + w += padding[0] + else: + if v2: + # left, top, right, bottom + datapoint.images[index].data = Fv2.pad( + datapoint.images[index].data, + (padding[0], padding[1], padding[2], padding[3]), + ) + else: + # left, top, right, bottom + datapoint.images[index].data = F.pad( + datapoint.images[index].data, + (padding[0], padding[1], padding[2], padding[3]), + ) + h += padding[1] + padding[3] + w += padding[0] + padding[2] + + datapoint.images[index].size = (h, w) + + for obj in datapoint.images[index].objects: + if len(padding) != 2: + obj.bbox += torch.as_tensor( + [padding[0], padding[1], padding[0], padding[1]], dtype=torch.float32 + ) + if obj.segment is not None: + if v2: + if len(padding) == 2: + obj.segment = Fv2.pad( + obj.segment[None], (0, 0, padding[0], padding[1]) + ).squeeze(0) + else: + obj.segment = Fv2.pad(obj.segment[None], tuple(padding)).squeeze(0) + else: + if len(padding) == 2: + obj.segment = F.pad(obj.segment, (0, 0, padding[0], padding[1])) + else: + obj.segment = F.pad(obj.segment, tuple(padding)) + + for query in datapoint.find_queries: + if query.semantic_target is not None: + if v2: + if len(padding) == 2: + query.semantic_target = Fv2.pad( + query.semantic_target[None, None], + (0, 0, padding[0], padding[1]), + ).squeeze() + else: + query.semantic_target = Fv2.pad( + query.semantic_target[None, None], tuple(padding) + ).squeeze() + else: + if len(padding) == 2: + query.semantic_target = F.pad( + query.semantic_target[None, None], + (0, 0, padding[0], padding[1]), + ).squeeze() + else: + query.semantic_target = F.pad( + query.semantic_target[None, None], tuple(padding) + ).squeeze() + if query.image_id == index and query.input_bbox is not None: + if len(padding) != 2: + query.input_bbox += torch.as_tensor( + [padding[0], padding[1], padding[0], padding[1]], + dtype=torch.float32, + ) + if query.image_id == index and query.input_points is not None: + if len(padding) != 2: + query.input_points += torch.as_tensor( + [padding[0], padding[1], 0], dtype=torch.float32 + ) + + return datapoint + + +class RandomSizeCropAPI: + def __init__( + self, + min_size: int, + max_size: int, + respect_boxes: bool, + consistent_transform: bool, + respect_input_boxes: bool = True, + v2: bool = False, + recompute_box_from_mask: bool = False, + ): + self.min_size = min_size + self.max_size = max_size + self.respect_boxes = respect_boxes # if True we can't crop a box out + self.respect_input_boxes = respect_input_boxes + self.consistent_transform = consistent_transform + self.v2 = v2 + self.recompute_box_from_mask = recompute_box_from_mask + + def _sample_no_respect_boxes(self, img): + w = random.randint(self.min_size, min(img.width, self.max_size)) + h = random.randint(self.min_size, min(img.height, self.max_size)) + return T.RandomCrop.get_params(img, (h, w)) + + def _sample_respect_boxes(self, img, boxes, points, min_box_size=10.0): + """ + Assure that no box or point is dropped via cropping, though portions + of boxes may be removed. + """ + if len(boxes) == 0 and len(points) == 0: + return self._sample_no_respect_boxes(img) + + if self.v2: + img_height, img_width = img.size()[-2:] + else: + img_width, img_height = img.size + + minW, minH, maxW, maxH = ( + min(img_width, self.min_size), + min(img_height, self.min_size), + min(img_width, self.max_size), + min(img_height, self.max_size), + ) + + # The crop box must extend one pixel beyond points to the bottom/right + # to assure the exclusive box contains the points. + minX = ( + torch.cat([boxes[:, 0] + min_box_size, points[:, 0] + 1], dim=0) + .max() + .item() + ) + minY = ( + torch.cat([boxes[:, 1] + min_box_size, points[:, 1] + 1], dim=0) + .max() + .item() + ) + minX = min(img_width, minX) + minY = min(img_height, minY) + maxX = torch.cat([boxes[:, 2] - min_box_size, points[:, 0]], dim=0).min().item() + maxY = torch.cat([boxes[:, 3] - min_box_size, points[:, 1]], dim=0).min().item() + maxX = max(0.0, maxX) + maxY = max(0.0, maxY) + minW = max(minW, minX - maxX) + minH = max(minH, minY - maxY) + w = random.uniform(minW, max(minW, maxW)) + h = random.uniform(minH, max(minH, maxH)) + if minX > maxX: + # i = random.uniform(max(0, minX - w + 1), max(maxX, max(0, minX - w + 1))) + i = random.uniform(max(0, minX - w), max(maxX, max(0, minX - w))) + else: + i = random.uniform( + max(0, minX - w + 1), max(maxX - 1, max(0, minX - w + 1)) + ) + if minY > maxY: + # j = random.uniform(max(0, minY - h + 1), max(maxY, max(0, minY - h + 1))) + j = random.uniform(max(0, minY - h), max(maxY, max(0, minY - h))) + else: + j = random.uniform( + max(0, minY - h + 1), max(maxY - 1, max(0, minY - h + 1)) + ) + + return [j, i, h, w] + + def __call__(self, datapoint, **kwargs): + if self.respect_boxes or self.respect_input_boxes: + if self.consistent_transform: + # Check that all the images are the same size + w, h = datapoint.images[0].data.size + for img in datapoint.images: + assert img.data.size == (w, h) + + all_boxes = [] + # Getting all boxes in all the images + if self.respect_boxes: + all_boxes += [ + obj.bbox.view(-1, 4) + for img in datapoint.images + for obj in img.objects + ] + # Get all the boxes in the find queries + if self.respect_input_boxes: + all_boxes += [ + q.input_bbox.view(-1, 4) + for q in datapoint.find_queries + if q.input_bbox is not None + ] + if all_boxes: + all_boxes = torch.cat(all_boxes, 0) + else: + all_boxes = torch.empty(0, 4) + + all_points = [ + q.input_points.view(-1, 3)[:, :2] + for q in datapoint.find_queries + if q.input_points is not None + ] + if all_points: + all_points = torch.cat(all_points, 0) + else: + all_points = torch.empty(0, 2) + + crop_param = self._sample_respect_boxes( + datapoint.images[0].data, all_boxes, all_points + ) + for i in range(len(datapoint.images)): + datapoint = crop( + datapoint, + i, + crop_param, + v2=self.v2, + check_validity=self.respect_boxes, + check_input_validity=self.respect_input_boxes, + recompute_box_from_mask=self.recompute_box_from_mask, + ) + return datapoint + else: + for i in range(len(datapoint.images)): + all_boxes = [] + # Get all boxes in the current image + if self.respect_boxes: + all_boxes += [ + obj.bbox.view(-1, 4) for obj in datapoint.images[i].objects + ] + # Get all the boxes in the find queries that correspond to this image + if self.respect_input_boxes: + all_boxes += [ + q.input_bbox.view(-1, 4) + for q in datapoint.find_queries + if q.image_id == i and q.input_bbox is not None + ] + if all_boxes: + all_boxes = torch.cat(all_boxes, 0) + else: + all_boxes = torch.empty(0, 4) + + all_points = [ + q.input_points.view(-1, 3)[:, :2] + for q in datapoint.find_queries + if q.input_points is not None + ] + if all_points: + all_points = torch.cat(all_points, 0) + else: + all_points = torch.empty(0, 2) + + crop_param = self._sample_respect_boxes( + datapoint.images[i].data, all_boxes, all_points + ) + datapoint = crop( + datapoint, + i, + crop_param, + v2=self.v2, + check_validity=self.respect_boxes, + check_input_validity=self.respect_input_boxes, + recompute_box_from_mask=self.recompute_box_from_mask, + ) + return datapoint + else: + if self.consistent_transform: + # Check that all the images are the same size + w, h = datapoint.images[0].data.size + for img in datapoint.images: + assert img.data.size == (w, h) + + crop_param = self._sample_no_respect_boxes(datapoint.images[0].data) + for i in range(len(datapoint.images)): + datapoint = crop( + datapoint, + i, + crop_param, + v2=self.v2, + check_validity=self.respect_boxes, + check_input_validity=self.respect_input_boxes, + recompute_box_from_mask=self.recompute_box_from_mask, + ) + return datapoint + else: + for i in range(len(datapoint.images)): + crop_param = self._sample_no_respect_boxes(datapoint.images[i].data) + datapoint = crop( + datapoint, + i, + crop_param, + v2=self.v2, + check_validity=self.respect_boxes, + check_input_validity=self.respect_input_boxes, + recompute_box_from_mask=self.recompute_box_from_mask, + ) + return datapoint + + +class CenterCropAPI: + def __init__(self, size, consistent_transform, recompute_box_from_mask=False): + self.size = size + self.consistent_transform = consistent_transform + self.recompute_box_from_mask = recompute_box_from_mask + + def _sample_crop(self, image_width, image_height): + crop_height, crop_width = self.size + crop_top = int(round((image_height - crop_height) / 2.0)) + crop_left = int(round((image_width - crop_width) / 2.0)) + return crop_top, crop_left, crop_height, crop_width + + def __call__(self, datapoint, **kwargs): + if self.consistent_transform: + # Check that all the images are the same size + w, h = datapoint.images[0].data.size + for img in datapoint.images: + assert img.size == (w, h) + + crop_top, crop_left, crop_height, crop_width = self._sample_crop(w, h) + for i in range(len(datapoint.images)): + datapoint = crop( + datapoint, + i, + (crop_top, crop_left, crop_height, crop_width), + recompute_box_from_mask=self.recompute_box_from_mask, + ) + return datapoint + + for i in range(len(datapoint.images)): + w, h = datapoint.images[i].data.size + crop_top, crop_left, crop_height, crop_width = self._sample_crop(w, h) + datapoint = crop( + datapoint, + i, + (crop_top, crop_left, crop_height, crop_width), + recompute_box_from_mask=self.recompute_box_from_mask, + ) + + return datapoint + + +class RandomHorizontalFlip: + def __init__(self, consistent_transform, p=0.5): + self.p = p + self.consistent_transform = consistent_transform + + def __call__(self, datapoint, **kwargs): + if self.consistent_transform: + if random.random() < self.p: + for i in range(len(datapoint.images)): + datapoint = hflip(datapoint, i) + return datapoint + for i in range(len(datapoint.images)): + if random.random() < self.p: + datapoint = hflip(datapoint, i) + return datapoint + + +class RandomResizeAPI: + def __init__( + self, sizes, consistent_transform, max_size=None, square=False, v2=False + ): + if isinstance(sizes, int): + sizes = (sizes,) + assert isinstance(sizes, Iterable) + self.sizes = list(sizes) + self.max_size = max_size + self.square = square + self.consistent_transform = consistent_transform + self.v2 = v2 + + def __call__(self, datapoint, **kwargs): + if self.consistent_transform: + size = random.choice(self.sizes) + for i in range(len(datapoint.images)): + datapoint = resize( + datapoint, i, size, self.max_size, square=self.square, v2=self.v2 + ) + return datapoint + for i in range(len(datapoint.images)): + size = random.choice(self.sizes) + datapoint = resize( + datapoint, i, size, self.max_size, square=self.square, v2=self.v2 + ) + return datapoint + + +class ScheduledRandomResizeAPI(RandomResizeAPI): + def __init__(self, size_scheduler, consistent_transform, square=False): + self.size_scheduler = size_scheduler + # Just a meaningful init value for super + params = self.size_scheduler(epoch_num=0) + sizes, max_size = params["sizes"], params["max_size"] + super().__init__(sizes, consistent_transform, max_size=max_size, square=square) + + def __call__(self, datapoint, **kwargs): + assert "epoch" in kwargs, "Param scheduler needs to know the current epoch" + params = self.size_scheduler(kwargs["epoch"]) + sizes, max_size = params["sizes"], params["max_size"] + self.sizes = sizes + self.max_size = max_size + datapoint = super(ScheduledRandomResizeAPI, self).__call__(datapoint, **kwargs) + return datapoint + + +class RandomPadAPI: + def __init__(self, max_pad, consistent_transform): + self.max_pad = max_pad + self.consistent_transform = consistent_transform + + def _sample_pad(self): + pad_x = random.randint(0, self.max_pad) + pad_y = random.randint(0, self.max_pad) + return pad_x, pad_y + + def __call__(self, datapoint, **kwargs): + if self.consistent_transform: + pad_x, pad_y = self._sample_pad() + for i in range(len(datapoint.images)): + datapoint = pad(datapoint, i, (pad_x, pad_y)) + return datapoint + + for i in range(len(datapoint.images)): + pad_x, pad_y = self._sample_pad() + datapoint = pad(datapoint, i, (pad_x, pad_y)) + return datapoint + + +class PadToSizeAPI: + def __init__(self, size, consistent_transform, bottom_right=False, v2=False): + self.size = size + self.consistent_transform = consistent_transform + self.v2 = v2 + self.bottom_right = bottom_right + + def _sample_pad(self, w, h): + pad_x = self.size - w + pad_y = self.size - h + assert pad_x >= 0 and pad_y >= 0 + pad_left = random.randint(0, pad_x) + pad_right = pad_x - pad_left + pad_top = random.randint(0, pad_y) + pad_bottom = pad_y - pad_top + return pad_left, pad_top, pad_right, pad_bottom + + def __call__(self, datapoint, **kwargs): + if self.consistent_transform: + # Check that all the images are the same size + w, h = datapoint.images[0].data.size + for img in datapoint.images: + assert img.size == (w, h) + if self.bottom_right: + pad_right = self.size - w + pad_bottom = self.size - h + padding = (pad_right, pad_bottom) + else: + padding = self._sample_pad(w, h) + for i in range(len(datapoint.images)): + datapoint = pad(datapoint, i, padding, v2=self.v2) + return datapoint + + for i, img in enumerate(datapoint.images): + w, h = img.data.size + if self.bottom_right: + pad_right = self.size - w + pad_bottom = self.size - h + padding = (pad_right, pad_bottom) + else: + padding = self._sample_pad(w, h) + datapoint = pad(datapoint, i, padding, v2=self.v2) + return datapoint + + +class RandomMosaicVideoAPI: + def __init__(self, prob=0.15, grid_h=2, grid_w=2, use_random_hflip=False): + self.prob = prob + self.grid_h = grid_h + self.grid_w = grid_w + self.use_random_hflip = use_random_hflip + + def __call__(self, datapoint, **kwargs): + if random.random() > self.prob: + return datapoint + + # select a random location to place the target mask in the mosaic + target_grid_y = random.randint(0, self.grid_h - 1) + target_grid_x = random.randint(0, self.grid_w - 1) + # whether to flip each grid in the mosaic horizontally + if self.use_random_hflip: + should_hflip = torch.rand(self.grid_h, self.grid_w) < 0.5 + else: + should_hflip = torch.zeros(self.grid_h, self.grid_w, dtype=torch.bool) + for i in range(len(datapoint.images)): + datapoint = random_mosaic_frame( + datapoint, + i, + grid_h=self.grid_h, + grid_w=self.grid_w, + target_grid_y=target_grid_y, + target_grid_x=target_grid_x, + should_hflip=should_hflip, + ) + + return datapoint + + +def random_mosaic_frame( + datapoint, + index, + grid_h, + grid_w, + target_grid_y, + target_grid_x, + should_hflip, +): + # Step 1: downsize the images and paste them into a mosaic + image_data = datapoint.images[index].data + is_pil = isinstance(image_data, PILImage.Image) + if is_pil: + H_im = image_data.height + W_im = image_data.width + image_data_output = PILImage.new("RGB", (W_im, H_im)) + else: + H_im = image_data.size(-2) + W_im = image_data.size(-1) + image_data_output = torch.zeros_like(image_data) + + downsize_cache = {} + for grid_y in range(grid_h): + for grid_x in range(grid_w): + y_offset_b = grid_y * H_im // grid_h + x_offset_b = grid_x * W_im // grid_w + y_offset_e = (grid_y + 1) * H_im // grid_h + x_offset_e = (grid_x + 1) * W_im // grid_w + H_im_downsize = y_offset_e - y_offset_b + W_im_downsize = x_offset_e - x_offset_b + + if (H_im_downsize, W_im_downsize) in downsize_cache: + image_data_downsize = downsize_cache[(H_im_downsize, W_im_downsize)] + else: + image_data_downsize = F.resize( + image_data, + size=(H_im_downsize, W_im_downsize), + interpolation=InterpolationMode.BILINEAR, + antialias=True, # antialiasing for downsizing + ) + downsize_cache[(H_im_downsize, W_im_downsize)] = image_data_downsize + if should_hflip[grid_y, grid_x].item(): + image_data_downsize = F.hflip(image_data_downsize) + + if is_pil: + image_data_output.paste(image_data_downsize, (x_offset_b, y_offset_b)) + else: + image_data_output[:, y_offset_b:y_offset_e, x_offset_b:x_offset_e] = ( + image_data_downsize + ) + + datapoint.images[index].data = image_data_output + + # Step 2: downsize the masks and paste them into the target grid of the mosaic + # (note that we don't scale input/target boxes since they are not used in TA) + for obj in datapoint.images[index].objects: + if obj.segment is None: + continue + assert obj.segment.shape == (H_im, W_im) and obj.segment.dtype == torch.uint8 + segment_output = torch.zeros_like(obj.segment) + + target_y_offset_b = target_grid_y * H_im // grid_h + target_x_offset_b = target_grid_x * W_im // grid_w + target_y_offset_e = (target_grid_y + 1) * H_im // grid_h + target_x_offset_e = (target_grid_x + 1) * W_im // grid_w + target_H_im_downsize = target_y_offset_e - target_y_offset_b + target_W_im_downsize = target_x_offset_e - target_x_offset_b + + segment_downsize = F.resize( + obj.segment[None, None], + size=(target_H_im_downsize, target_W_im_downsize), + interpolation=InterpolationMode.BILINEAR, + antialias=True, # antialiasing for downsizing + )[0, 0] + if should_hflip[target_grid_y, target_grid_x].item(): + segment_downsize = F.hflip(segment_downsize[None, None])[0, 0] + + segment_output[ + target_y_offset_b:target_y_offset_e, target_x_offset_b:target_x_offset_e + ] = segment_downsize + obj.segment = segment_output + + return datapoint + + +class ScheduledPadToSizeAPI(PadToSizeAPI): + def __init__(self, size_scheduler, consistent_transform): + self.size_scheduler = size_scheduler + size = self.size_scheduler(epoch_num=0)["sizes"] + super().__init__(size, consistent_transform) + + def __call__(self, datapoint, **kwargs): + assert "epoch" in kwargs, "Param scheduler needs to know the current epoch" + params = self.size_scheduler(kwargs["epoch"]) + self.size = params["resolution"] + return super(ScheduledPadToSizeAPI, self).__call__(datapoint, **kwargs) + + +class IdentityAPI: + def __call__(self, datapoint, **kwargs): + return datapoint + + +class RandomSelectAPI: + """ + Randomly selects between transforms1 and transforms2, + with probability p for transforms1 and (1 - p) for transforms2 + """ + + def __init__(self, transforms1=None, transforms2=None, p=0.5): + self.transforms1 = transforms1 or IdentityAPI() + self.transforms2 = transforms2 or IdentityAPI() + self.p = p + + def __call__(self, datapoint, **kwargs): + if random.random() < self.p: + return self.transforms1(datapoint, **kwargs) + return self.transforms2(datapoint, **kwargs) + + +class ToTensorAPI: + def __init__(self, v2=False): + self.v2 = v2 + + def __call__(self, datapoint: Datapoint, **kwargs): + for img in datapoint.images: + if self.v2: + img.data = Fv2.to_image_tensor(img.data) + # img.data = Fv2.to_dtype(img.data, torch.uint8, scale=True) + # img.data = Fv2.convert_image_dtype(img.data, torch.uint8) + else: + img.data = F.to_tensor(img.data) + return datapoint + + +class NormalizeAPI: + def __init__(self, mean, std, v2=False): + self.mean = mean + self.std = std + self.v2 = v2 + + def __call__(self, datapoint: Datapoint, **kwargs): + for img in datapoint.images: + if self.v2: + img.data = Fv2.convert_image_dtype(img.data, torch.float32) + img.data = Fv2.normalize(img.data, mean=self.mean, std=self.std) + else: + img.data = F.normalize(img.data, mean=self.mean, std=self.std) + for obj in img.objects: + boxes = obj.bbox + cur_h, cur_w = img.data.shape[-2:] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor( + [cur_w, cur_h, cur_w, cur_h], dtype=torch.float32 + ) + obj.bbox = boxes + + for query in datapoint.find_queries: + if query.input_bbox is not None: + boxes = query.input_bbox + cur_h, cur_w = datapoint.images[query.image_id].data.shape[-2:] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor( + [cur_w, cur_h, cur_w, cur_h], dtype=torch.float32 + ) + query.input_bbox = boxes + if query.input_points is not None: + points = query.input_points + cur_h, cur_w = datapoint.images[query.image_id].data.shape[-2:] + points = points / torch.tensor([cur_w, cur_h, 1.0], dtype=torch.float32) + query.input_points = points + + return datapoint + + +class ComposeAPI: + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, datapoint, **kwargs): + for t in self.transforms: + datapoint = t(datapoint, **kwargs) + return datapoint + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string + + +class RandomGrayscale: + def __init__(self, consistent_transform, p=0.5): + self.p = p + self.consistent_transform = consistent_transform + self.Grayscale = T.Grayscale(num_output_channels=3) + + def __call__(self, datapoint: Datapoint, **kwargs): + if self.consistent_transform: + if random.random() < self.p: + for img in datapoint.images: + img.data = self.Grayscale(img.data) + return datapoint + for img in datapoint.images: + if random.random() < self.p: + img.data = self.Grayscale(img.data) + return datapoint + + +class ColorJitter: + def __init__(self, consistent_transform, brightness, contrast, saturation, hue): + self.consistent_transform = consistent_transform + self.brightness = ( + brightness + if isinstance(brightness, list) + else [max(0, 1 - brightness), 1 + brightness] + ) + self.contrast = ( + contrast + if isinstance(contrast, list) + else [max(0, 1 - contrast), 1 + contrast] + ) + self.saturation = ( + saturation + if isinstance(saturation, list) + else [max(0, 1 - saturation), 1 + saturation] + ) + self.hue = hue if isinstance(hue, list) or hue is None else ([-hue, hue]) + + def __call__(self, datapoint: Datapoint, **kwargs): + if self.consistent_transform: + # Create a color jitter transformation params + ( + fn_idx, + brightness_factor, + contrast_factor, + saturation_factor, + hue_factor, + ) = T.ColorJitter.get_params( + self.brightness, self.contrast, self.saturation, self.hue + ) + for img in datapoint.images: + if not self.consistent_transform: + ( + fn_idx, + brightness_factor, + contrast_factor, + saturation_factor, + hue_factor, + ) = T.ColorJitter.get_params( + self.brightness, self.contrast, self.saturation, self.hue + ) + for fn_id in fn_idx: + if fn_id == 0 and brightness_factor is not None: + img.data = F.adjust_brightness(img.data, brightness_factor) + elif fn_id == 1 and contrast_factor is not None: + img.data = F.adjust_contrast(img.data, contrast_factor) + elif fn_id == 2 and saturation_factor is not None: + img.data = F.adjust_saturation(img.data, saturation_factor) + elif fn_id == 3 and hue_factor is not None: + img.data = F.adjust_hue(img.data, hue_factor) + return datapoint + + +class RandomAffine: + def __init__( + self, + degrees, + consistent_transform, + scale=None, + translate=None, + shear=None, + image_mean=(123, 116, 103), + log_warning=True, + num_tentatives=1, + image_interpolation="bicubic", + ): + """ + The mask is required for this transform. + if consistent_transform if True, then the same random affine is applied to all frames and masks. + """ + self.degrees = degrees if isinstance(degrees, list) else ([-degrees, degrees]) + self.scale = scale + self.shear = ( + shear if isinstance(shear, list) else ([-shear, shear] if shear else None) + ) + self.translate = translate + self.fill_img = image_mean + self.consistent_transform = consistent_transform + self.log_warning = log_warning + self.num_tentatives = num_tentatives + + if image_interpolation == "bicubic": + self.image_interpolation = InterpolationMode.BICUBIC + elif image_interpolation == "bilinear": + self.image_interpolation = InterpolationMode.BILINEAR + else: + raise NotImplementedError + + def __call__(self, datapoint: Datapoint, **kwargs): + for _tentative in range(self.num_tentatives): + res = self.transform_datapoint(datapoint) + if res is not None: + return res + + if self.log_warning: + logging.warning( + f"Skip RandomAffine for zero-area mask in first frame after {self.num_tentatives} tentatives" + ) + return datapoint + + def transform_datapoint(self, datapoint: Datapoint): + _, height, width = F.get_dimensions(datapoint.images[0].data) + img_size = [width, height] + + if self.consistent_transform: + # Create a random affine transformation + affine_params = T.RandomAffine.get_params( + degrees=self.degrees, + translate=self.translate, + scale_ranges=self.scale, + shears=self.shear, + img_size=img_size, + ) + + for img_idx, img in enumerate(datapoint.images): + this_masks = [ + obj.segment.unsqueeze(0) if obj.segment is not None else None + for obj in img.objects + ] + if not self.consistent_transform: + # if not consistent we create a new affine params for every frame&mask pair Create a random affine transformation + affine_params = T.RandomAffine.get_params( + degrees=self.degrees, + translate=self.translate, + scale_ranges=self.scale, + shears=self.shear, + img_size=img_size, + ) + + transformed_bboxes, transformed_masks = [], [] + for i in range(len(img.objects)): + if this_masks[i] is None: + transformed_masks.append(None) + # Dummy bbox for a dummy target + transformed_bboxes.append(torch.tensor([[0, 0, 0, 0]])) + else: + transformed_mask = F.affine( + this_masks[i], + *affine_params, + interpolation=InterpolationMode.NEAREST, + fill=0.0, + ) + if img_idx == 0 and transformed_mask.max() == 0: + # We are dealing with a video and the object is not visible in the first frame + # Return the datapoint without transformation + return None + transformed_bbox = masks_to_boxes(transformed_mask) + transformed_bboxes.append(transformed_bbox) + transformed_masks.append(transformed_mask.squeeze()) + + for i in range(len(img.objects)): + img.objects[i].bbox = transformed_bboxes[i] + img.objects[i].segment = transformed_masks[i] + + img.data = F.affine( + img.data, + *affine_params, + interpolation=self.image_interpolation, + fill=self.fill_img, + ) + return datapoint + + +class RandomResizedCrop: + def __init__( + self, + consistent_transform, + size, + scale=None, + ratio=None, + log_warning=True, + num_tentatives=4, + keep_aspect_ratio=False, + ): + """ + The mask is required for this transform. + if consistent_transform if True, then the same random resized crop is applied to all frames and masks. + """ + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + elif isinstance(size, Sequence) and len(size) == 1: + self.size = (size[0], size[0]) + elif len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + else: + self.size = size + + self.scale = scale if scale is not None else (0.08, 1.0) + self.ratio = ratio if ratio is not None else (3.0 / 4.0, 4.0 / 3.0) + self.consistent_transform = consistent_transform + self.log_warning = log_warning + self.num_tentatives = num_tentatives + self.keep_aspect_ratio = keep_aspect_ratio + + def __call__(self, datapoint: Datapoint, **kwargs): + for _tentative in range(self.num_tentatives): + res = self.transform_datapoint(datapoint) + if res is not None: + return res + + if self.log_warning: + logging.warning( + f"Skip RandomResizeCrop for zero-area mask in first frame after {self.num_tentatives} tentatives" + ) + return datapoint + + def transform_datapoint(self, datapoint: Datapoint): + if self.keep_aspect_ratio: + original_size = datapoint.images[0].size + original_ratio = original_size[1] / original_size[0] + ratio = [r * original_ratio for r in self.ratio] + else: + ratio = self.ratio + + if self.consistent_transform: + # Create a random crop transformation + crop_params = T.RandomResizedCrop.get_params( + img=datapoint.images[0].data, + scale=self.scale, + ratio=ratio, + ) + + for img_idx, img in enumerate(datapoint.images): + if not self.consistent_transform: + # Create a random crop transformation + crop_params = T.RandomResizedCrop.get_params( + img=img.data, + scale=self.scale, + ratio=ratio, + ) + + this_masks = [ + obj.segment.unsqueeze(0) if obj.segment is not None else None + for obj in img.objects + ] + + transformed_bboxes, transformed_masks = [], [] + for i in range(len(img.objects)): + if this_masks[i] is None: + transformed_masks.append(None) + # Dummy bbox for a dummy target + transformed_bboxes.append(torch.tensor([[0, 0, 0, 0]])) + else: + transformed_mask = F.resized_crop( + this_masks[i], + *crop_params, + size=self.size, + interpolation=InterpolationMode.NEAREST, + ) + if img_idx == 0 and transformed_mask.max() == 0: + # We are dealing with a video and the object is not visible in the first frame + # Return the datapoint without transformation + return None + transformed_masks.append(transformed_mask.squeeze()) + transformed_bbox = masks_to_boxes(transformed_mask) + transformed_bboxes.append(transformed_bbox) + + # Set the new boxes and masks if all transformed masks and boxes are good. + for i in range(len(img.objects)): + img.objects[i].bbox = transformed_bboxes[i] + img.objects[i].segment = transformed_masks[i] + + img.data = F.resized_crop( + img.data, + *crop_params, + size=self.size, + interpolation=InterpolationMode.BILINEAR, + ) + return datapoint + + +class ResizeToMaxIfAbove: + # Resize datapoint image if one of its sides is larger that max_size + def __init__( + self, + max_size=None, + ): + self.max_size = max_size + + def __call__(self, datapoint: Datapoint, **kwargs): + _, height, width = F.get_dimensions(datapoint.images[0].data) + + if height <= self.max_size and width <= self.max_size: + # The original frames are small enough + return datapoint + elif height >= width: + new_height = self.max_size + new_width = int(round(self.max_size * width / height)) + else: + new_height = int(round(self.max_size * height / width)) + new_width = self.max_size + + size = new_height, new_width + + for index in range(len(datapoint.images)): + datapoint.images[index].data = F.resize(datapoint.images[index].data, size) + + for obj in datapoint.images[index].objects: + obj.segment = F.resize( + obj.segment[None, None], + size, + interpolation=InterpolationMode.NEAREST, + ).squeeze() + + h, w = size + datapoint.images[index].size = (h, w) + return datapoint + + +def get_bbox_xyxy_abs_coords_from_mask(mask): + """Get the bounding box (XYXY format w/ absolute coordinates) of a binary mask.""" + assert mask.dim() == 2 + rows = torch.any(mask, dim=1) + cols = torch.any(mask, dim=0) + row_inds = rows.nonzero().view(-1) + col_inds = cols.nonzero().view(-1) + if row_inds.numel() == 0: + # mask is empty + bbox = torch.zeros(1, 4, dtype=torch.float32) + bbox_area = 0.0 + else: + ymin, ymax = row_inds.min(), row_inds.max() + xmin, xmax = col_inds.min(), col_inds.max() + bbox = torch.tensor([xmin, ymin, xmax, ymax], dtype=torch.float32).view(1, 4) + bbox_area = float((ymax - ymin) * (xmax - xmin)) + return bbox, bbox_area + + +class MotionBlur: + def __init__(self, kernel_size=5, consistent_transform=True, p=0.5): + assert kernel_size % 2 == 1, "Kernel size must be odd." + self.kernel_size = kernel_size + self.consistent_transform = consistent_transform + self.p = p + + def __call__(self, datapoint: Datapoint, **kwargs): + if random.random() >= self.p: + return datapoint + if self.consistent_transform: + # Generate a single motion blur kernel for all images + kernel = self._generate_motion_blur_kernel() + for img in datapoint.images: + if not self.consistent_transform: + # Generate a new motion blur kernel for each image + kernel = self._generate_motion_blur_kernel() + img.data = self._apply_motion_blur(img.data, kernel) + + return datapoint + + def _generate_motion_blur_kernel(self): + kernel = torch.zeros((self.kernel_size, self.kernel_size)) + direction = random.choice(["horizontal", "vertical", "diagonal"]) + if direction == "horizontal": + kernel[self.kernel_size // 2, :] = 1.0 + elif direction == "vertical": + kernel[:, self.kernel_size // 2] = 1.0 + elif direction == "diagonal": + for i in range(self.kernel_size): + kernel[i, i] = 1.0 + kernel /= kernel.sum() + return kernel + + def _apply_motion_blur(self, image, kernel): + if isinstance(image, PILImage.Image): + image = F.to_tensor(image) + channels = image.shape[0] + kernel = kernel.to(image.device).unsqueeze(0).unsqueeze(0) + blurred_image = torch.nn.functional.conv2d( + image.unsqueeze(0), + kernel.repeat(channels, 1, 1, 1), + padding=self.kernel_size // 2, + groups=channels, + ) + return F.to_pil_image(blurred_image.squeeze(0)) + + +class LargeScaleJitter: + def __init__( + self, + scale_range=(0.1, 2.0), + aspect_ratio_range=(0.75, 1.33), + crop_size=(640, 640), + consistent_transform=True, + p=0.5, + ): + """ + Args:rack + scale_range (tuple): Range of scaling factors (min_scale, max_scale). + aspect_ratio_range (tuple): Range of aspect ratios (min_aspect_ratio, max_aspect_ratio). + crop_size (tuple): Target size of the cropped region (width, height). + consistent_transform (bool): Whether to apply the same transformation across all frames. + p (float): Probability of applying the transformation. + """ + self.scale_range = scale_range + self.aspect_ratio_range = aspect_ratio_range + self.crop_size = crop_size + self.consistent_transform = consistent_transform + self.p = p + + def __call__(self, datapoint: Datapoint, **kwargs): + if random.random() >= self.p: + return datapoint + + # Sample a single scale factor and aspect ratio for all frames + log_ratio = torch.log(torch.tensor(self.aspect_ratio_range)) + scale_factor = torch.empty(1).uniform_(*self.scale_range).item() + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + for idx, img in enumerate(datapoint.images): + if not self.consistent_transform: + # Sample a new scale factor and aspect ratio for each frame + log_ratio = torch.log(torch.tensor(self.aspect_ratio_range)) + scale_factor = torch.empty(1).uniform_(*self.scale_range).item() + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + # Compute the dimensions of the jittered crop + original_width, original_height = img.data.size + target_area = original_width * original_height * scale_factor + crop_width = int(round((target_area * aspect_ratio) ** 0.5)) + crop_height = int(round((target_area / aspect_ratio) ** 0.5)) + + # Randomly select the top-left corner of the crop + crop_x = random.randint(0, max(0, original_width - crop_width)) + crop_y = random.randint(0, max(0, original_height - crop_height)) + + # Extract the cropped region + datapoint = crop(datapoint, idx, (crop_x, crop_y, crop_width, crop_height)) + + # Resize the cropped region to the target crop size + datapoint = resize(datapoint, idx, self.crop_size) + + return datapoint diff --git a/detect_tools/sam3/sam3/train/transforms/filter_query_transforms.py b/detect_tools/sam3/sam3/train/transforms/filter_query_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6708f453d34a1ba05de8a461c9aee5d0636f45 --- /dev/null +++ b/detect_tools/sam3/sam3/train/transforms/filter_query_transforms.py @@ -0,0 +1,607 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +import random + +from collections import defaultdict +from typing import List, Optional, Union + +import torch + +from sam3.train.data.sam3_image_dataset import Datapoint, FindQuery, Object + + +class FilterDataPointQueries: + find_ids_to_filter: set = None + get_ids_to_filter: set = None + obj_ids_to_filter: set = None # stored as pairs (img_id, obj_id) + + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + """ + Compute set of query ids to keep, for both find and get queries + """ + raise NotImplementedError + + def _do_filter_query(self, query: Union[FindQuery], query_id: int): + assert self.find_ids_to_filter is not None + + return query_id in self.find_ids_to_filter + + +class FilterQueryWithText(FilterDataPointQueries): + """ + Filter all datapoints which have query text in a specified list of exluded terms + """ + + def __init__( + self, exclude_find_keys: List[str] = None, exclude_get_keys: List[str] = None + ): + self.find_filter_keys = exclude_find_keys if exclude_find_keys else [] + self.get_filter_keys = exclude_get_keys if exclude_get_keys else [] + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + del_find_ids = [] + del_get_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if f_q.query_text in self.find_filter_keys: + del_find_ids.append(i) + + self.find_ids_to_filter = set(del_find_ids) + + +class KeepMaxNumFindQueries(FilterDataPointQueries): + def __init__( + self, max_num_find_queries: int, retain_positive_queries: bool = False + ): + self.max_num_find_queries = max_num_find_queries + self.retain_positive_queries = retain_positive_queries + + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + num_find_queries = len(datapoint.find_queries) + if num_find_queries <= self.max_num_find_queries: + self.find_ids_to_filter = set() # keep all find queries + return + + if not self.retain_positive_queries: + all_find_query_ids = list(range(num_find_queries)) + num_queries_to_filter = max(0, num_find_queries - self.max_num_find_queries) + query_ids_to_filter = random.sample( + all_find_query_ids, k=num_queries_to_filter + ) + else: + # keep up to max_num_find_queries postive find queries and fill + # the remaining slots (if any) with negative find queries + pos_find_ids, neg_find_ids = [], [] + for i, f_q in enumerate(datapoint.find_queries): + # Negative finds return an empty list of object_ids_output + if len(f_q.object_ids_output) == 0: + neg_find_ids.append(i) + else: + pos_find_ids.append(i) + + if len(pos_find_ids) >= self.max_num_find_queries: + # we have more positive find queries than `max_num_find_queries`, + # so we subsample postive find queries and remove all negative find queries + num_queries_to_filter = len(pos_find_ids) - self.max_num_find_queries + query_ids_to_filter = random.sample( + pos_find_ids, k=num_queries_to_filter + ) + query_ids_to_filter.extend(neg_find_ids) + else: + # we have fewer positive find queries than `max_num_find_queries` + # so we need to fill the remaining with negative find queries + num_queries_to_filter = num_find_queries - self.max_num_find_queries + query_ids_to_filter = random.sample( + neg_find_ids, k=num_queries_to_filter + ) + + assert len(query_ids_to_filter) == num_find_queries - self.max_num_find_queries + self.find_ids_to_filter = set(query_ids_to_filter) + + +class KeepMaxNumFindQueriesVideo(FilterDataPointQueries): + def __init__( + self, + video_mosaic_max_num_find_queries_per_frame: int, + retain_positive_queries: bool = False, + ): + self.video_mosaic_max_num_find_queries_per_frame = ( + video_mosaic_max_num_find_queries_per_frame + ) + self.retain_positive_queries = retain_positive_queries + + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + num_find_queries = len(datapoint.find_queries) + + findQueries_to_imageIds = defaultdict(list) + max_queries_per_frame = True + for i, f_q in enumerate(datapoint.find_queries): + findQueries_to_imageIds[f_q.image_id].append(i) + if ( + len(findQueries_to_imageIds[f_q.image_id]) + > self.video_mosaic_max_num_find_queries_per_frame + ): + max_queries_per_frame = False + + if max_queries_per_frame: + self.find_ids_to_filter = set() + return + + num_frames = len(findQueries_to_imageIds) + findQueries_0 = findQueries_to_imageIds[0] + num_find_queries_0 = len(findQueries_0) + max_num_find_queries_per_frame = ( + self.video_mosaic_max_num_find_queries_per_frame + ) + if not self.retain_positive_queries: + find_query_ids_0 = list(range(num_find_queries_0)) + num_queries_to_filter = max( + 0, num_find_queries_0 - max_num_find_queries_per_frame + ) + query_ids_to_filter_0 = random.sample( + find_query_ids_0, k=num_queries_to_filter + ) + else: + # keep up to max_num_find_queries postive find queries and fill + # the remaining slots (if any) with negative find queries + pos_find_ids_0, neg_find_ids_0 = [], [] + for i, f_q_id in enumerate(findQueries_0): + f_q = datapoint.find_queries[f_q_id] + # Negative finds return an empty list of object_ids_output + if len(f_q.object_ids_output) == 0: + neg_find_ids_0.append(i) + else: + pos_find_ids_0.append(i) + + if len(pos_find_ids_0) >= max_num_find_queries_per_frame: + # we have more positive find queries than `max_num_find_queries`, + # so we subsample postive find queries and remove all negative find queries + num_queries_to_filter = ( + len(pos_find_ids_0) - max_num_find_queries_per_frame + ) + query_ids_to_filter_0 = random.sample( + pos_find_ids_0, k=num_queries_to_filter + ) + query_ids_to_filter_0.extend(neg_find_ids_0) + else: + # we have fewer positive find queries than `max_num_find_queries` + # so we need to fill the remaining with negative find queries + num_queries_to_filter = ( + num_find_queries_0 - max_num_find_queries_per_frame + ) + query_ids_to_filter_0 = random.sample( + neg_find_ids_0, k=num_queries_to_filter + ) + + # get based on frame 0 all find queries from all the frames with the same indices as in frame 0 + query_ids_to_filter = [] + for i in range(num_frames): + findQueries_i = findQueries_to_imageIds[i] + query_ids_to_filter.extend( + [findQueries_i[j] for j in query_ids_to_filter_0] + ) + + assert ( + len(query_ids_to_filter) + == num_find_queries + - self.video_mosaic_max_num_find_queries_per_frame * num_frames + ) + self.find_ids_to_filter = set(query_ids_to_filter) + + +class KeepSemanticFindQueriesOnly(FilterDataPointQueries): + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + self.find_ids_to_filter = { + i for i, q in enumerate(datapoint.find_queries) if q.input_bbox is not None + } # filter (remove) geometric find queries (whose input_bbox is not None) + + # Keep all get queries which don't depend on filtered finds + + +class KeepUnaryFindQueriesOnly(FilterDataPointQueries): + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + self.find_ids_to_filter = set() + + # Keep all get queries which don't depend on filtered finds + + +class FilterZeroBoxQueries(FilterDataPointQueries): + """ + Filters all find queries which predict a box with zero area + """ + + @staticmethod + def _is_zero_area_object(obj: Object): + # Check if height or width of bounding box is zero + bbox = obj.bbox # Assume in XYXY format + height = bbox[..., 3].item() - bbox[..., 1].item() + width = bbox[..., 2].item() - bbox[..., 0].item() + + return height == 0 or width == 0 + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # Find objects with zero area + # Assume only one image per datapoint + image_objects = datapoint.images[0].objects + exclude_objects = { + obj_id + for obj_id, obj in enumerate(image_objects) + if self._is_zero_area_object(obj) + } + + # If a query predicts an object with zero area, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + f_q_objects = set(f_q.object_ids_output) + if len(exclude_objects.intersection(f_q_objects)) > 0: + del_find_ids.append(i) + + self.find_ids_to_filter = set(del_find_ids) + + +class FilterFindQueriesWithTooManyOut(FilterDataPointQueries): + """ + Filters all find queries which have more than a specified number of objects in the output + """ + + def __init__(self, max_num_objects: int): + self.max_num_objects = max_num_objects + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # If a query predicts more than max_num_objects, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if len(f_q.object_ids_output) > self.max_num_objects: + del_find_ids.append(i) + + self.find_ids_to_filter = set(del_find_ids) + + +class FilterEmptyTargets(FilterDataPointQueries): + """ + Filters all targets which have zero area + """ + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + for img_id in range(len(datapoint.images)): + for obj_id, obj in enumerate(datapoint.images[img_id].objects): + if obj.area < 1e-6: + self.obj_ids_to_filter.add((img_id, obj_id)) + self.find_ids_to_filter = set() + + +class FilterNonExhaustiveFindQueries(FilterDataPointQueries): + """ + Filters all find queries which are non-exhaustive + """ + + def __init__(self, exhaustivity_type: str): + """ + Args: + exhaustivity_type: Can be "pixel" or "instance": + -pixel: filter queries where the union of all segments covers every pixel belonging to target class + -instance: filter queries where there are non-separable or non annotated instances + Note that instance exhaustivity implies pixel exhaustivity + """ + assert exhaustivity_type in ["pixel", "instance"] + self.exhaustivity_type = exhaustivity_type + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # If a query predicts more than max_num_objects, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if self.exhaustivity_type == "instance": + if not f_q.is_exhaustive: + del_find_ids.append(i) + elif self.exhaustivity_type == "pixel": + if f_q.is_pixel_exhaustive is not None and not f_q.is_pixel_exhaustive: + del_find_ids.append(i) + else: + raise RuntimeError( + f"Unknown exhaustivity type {self.exhaustivity_type}" + ) + + self.find_ids_to_filter = set(del_find_ids) + + +class FilterInvalidGeometricQueries(FilterDataPointQueries): + """ + Filters geometric queries whose output got deleted (eg due to cropping) + """ + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # If a query predicts more than max_num_objects, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if f_q.input_bbox is not None and f_q.query_text == "geometric": + if len(f_q.object_ids_output) == 0: + del_find_ids.append(i) + self.find_ids_to_filter = set(del_find_ids) + + +class FlexibleFilterFindGetQueries: + def __init__( + self, query_filter: FilterDataPointQueries, enabled: bool = True + ) -> None: + self.query_filter = query_filter + self.enabled = enabled + + def __call__(self, datapoint, **kwargs): + if not self.enabled: + return datapoint + + # Identify all queries to filter + self.query_filter.identify_queries_to_filter(datapoint=datapoint) + + del_find_ids = [] + del_get_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if self.query_filter._do_filter_query(f_q, i): + datapoint.find_queries[i] = None + del_find_ids.append(i) + + new_find_queries = [] + new_get_queries = [] + + find_old_to_new_map = {} + get_old_to_new_map = {} + + find_counter = 0 + get_counter = 0 + + for i, f_q in enumerate(datapoint.find_queries): + if f_q is not None: + find_old_to_new_map[i] = find_counter + find_counter += 1 + new_find_queries.append(f_q) + + start_with_zero_check = False + for n_f_q in new_find_queries: + if n_f_q.query_processing_order == 0: + start_with_zero_check = True + break + + if len(new_find_queries) == 0: + start_with_zero_check = True + + assert ( + start_with_zero_check + ), "Invalid Find queries, they need to start at query_processing_order = 0" + + datapoint.find_queries = new_find_queries + + if len(datapoint.find_queries) == 0: + print("Warning: No find queries left in datapoint, this is not allowed") + print("Filtering function:", self.query_filter) + print("Datapoint:", datapoint) + raise ValueError + + # The deletion may have removed intermediate steps, so we need to remap to make them contiguous again + all_stages = sorted( + list(set(q.query_processing_order for q in datapoint.find_queries)) + ) + stage_map = {qpo: i for i, qpo in enumerate(all_stages)} + for i in range(len(datapoint.find_queries)): + qpo = datapoint.find_queries[i].query_processing_order + datapoint.find_queries[i].query_processing_order = stage_map[qpo] + + # Final step, clear up objects that are not used anymore + for img_id in range(len(datapoint.images)): + all_objects_ids = set( + i + for find in datapoint.find_queries + for i in find.object_ids_output + if find.image_id == img_id + ) + unused_ids = ( + set(range(len(datapoint.images[img_id].objects))) - all_objects_ids + ) + for tgt_img_id, tgt_obj_id in self.query_filter.obj_ids_to_filter: + if tgt_img_id == img_id: + unused_ids.add(tgt_obj_id) + + if len(unused_ids) > 0: + old_objects = datapoint.images[img_id].objects + object_old_to_new_map = {} + new_objects = [] + for i, o in enumerate(old_objects): + if i not in unused_ids: + object_old_to_new_map[i] = len(new_objects) + new_objects.append(o) + + datapoint.images[img_id].objects = new_objects + + # Remap the outputs of the find queries + affected_find_queries_ids = set() + object_old_to_new_map_per_query = {} + for fid, find in enumerate(datapoint.find_queries): + if find.image_id == img_id: + old_object_ids_output = find.object_ids_output + object_old_to_new_map_per_query[fid] = {} + find.object_ids_output = [] + for oid, old_obj_id in enumerate(old_object_ids_output): + if old_obj_id not in unused_ids: + new_obj_id = object_old_to_new_map[old_obj_id] + find.object_ids_output.append(new_obj_id) + object_old_to_new_map_per_query[fid][oid] = ( + len(find.object_ids_output) - 1 + ) + affected_find_queries_ids.add(fid) + + # finally remove unused images + all_imgs_to_keep = set() + for f_q in datapoint.find_queries: + all_imgs_to_keep.add(f_q.image_id) + + old_img_id_to_new_img_id = {} + new_images = [] + for img_id, img in enumerate(datapoint.images): + if img_id in all_imgs_to_keep: + old_img_id_to_new_img_id[img_id] = len(new_images) + new_images.append(img) + datapoint.images = new_images + + for f_q in datapoint.find_queries: + f_q.image_id = old_img_id_to_new_img_id[f_q.image_id] + + return datapoint + + +class AddPrefixSuffixToFindText: + """ + Add prefix or suffix strings to find query text on the fly. + + If `condition_on_text` is True, the prefix or suffix strings are only added + to those find query text in `condition_text_list` (case-insensitive). + """ + + def __init__( + self, + prefix: Optional[str] = None, + suffix: Optional[str] = None, + condition_on_text: bool = False, + condition_text_list: Optional[List[str]] = None, + enabled: bool = True, + ) -> None: + self.prefix = prefix + self.suffix = suffix + self.condition_on_text = condition_on_text + if self.condition_on_text: + assert condition_text_list is not None + self.condition_text_set = {s.lower().strip() for s in condition_text_list} + self.enabled = enabled + if self.enabled: + logging.info( + f"AddPrefixSuffixToFindText: prefix={prefix}, suffix={suffix}, " + f"condition_on_text={condition_on_text}, condition_text_list={condition_text_list}" + ) + + def __call__(self, datapoint, **kwargs): + if not self.enabled: + return datapoint + + for find in datapoint.find_queries: + if find.query_text == "geometric": + # skip geometric find queries + continue + if ( + self.condition_on_text + and find.query_text.lower().strip() not in self.condition_text_set + ): + # if condition_on_text is True, skip those queries not in condition_text_set + continue + + # add prefix and/or suffix strings to the find query text + if self.prefix is not None: + find.query_text = self.prefix + find.query_text + if self.suffix is not None: + find.query_text = find.query_text + self.suffix + + return datapoint + + +class FilterCrowds(FilterDataPointQueries): + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + """ + Compute set of query ids to keep, for both find and get queries + """ + self.obj_ids_to_filter = set() + self.find_ids_to_filter = set() + # self.get_ids_to_filter = set() + for img_id, img in enumerate(datapoint.images): + for obj_id, obj in enumerate(img.objects): + if obj.is_crowd: + self.obj_ids_to_filter.add((img_id, obj_id)) + + +class TextQueryToVisual: + """ + Transform a test query to a visual query (with some proba), using any of the output targets as the prompt + """ + + def __init__(self, probability, keep_text_queries=False) -> None: + self.probability = probability + assert 0 <= probability <= 1 + self.keep_text_queries = keep_text_queries + + def __call__(self, datapoint: Datapoint, **kwargs): + for find in datapoint.find_queries: + if find.input_bbox is not None or find.input_points is not None: + # skip geometric find queries + continue + + if len(find.object_ids_output) == 0: + # Can't create a visual query, skip + continue + + if find.query_processing_order > 0: + # Second stage query, can't use + continue + + if random.random() > self.probability: + continue + + selected_vq_id = random.choice(find.object_ids_output) + img_id = find.image_id + + find.input_bbox = datapoint.images[img_id].objects[selected_vq_id].bbox + find.input_bbox_label = torch.ones(1, dtype=torch.bool) + if not self.keep_text_queries: + find.query_text = "visual" + + return datapoint + + +class RemoveInputBoxes: + """ + Remove input boxes from find queries + """ + + def __init__(self) -> None: + pass + + def __call__(self, datapoint: Datapoint, **kwargs): + for find in datapoint.find_queries: + if find.input_bbox is None: + continue + + if find.query_text == "geometric": + print("Warning: removing input box from geometric find query") + + find.input_bbox = None + return datapoint + + +class OverwriteTextQuery: + """ + With some probability, overwrite the text query with a custom text + """ + + def __init__(self, target_text, probability=1.0) -> None: + self.probability = probability + self.target_text = target_text + assert 0 <= probability <= 1 + + def __call__(self, datapoint: Datapoint, **kwargs): + for find in datapoint.find_queries: + if random.random() > self.probability: + continue + + find.query_text = self.target_text + + return datapoint diff --git a/detect_tools/sam3/sam3/train/transforms/point_sampling.py b/detect_tools/sam3/sam3/train/transforms/point_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..e083fde416467fb73a1321b2d6734f591a567eab --- /dev/null +++ b/detect_tools/sam3/sam3/train/transforms/point_sampling.py @@ -0,0 +1,345 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import cv2 +import numpy as np +import torch +from PIL import Image as PILImage +from pycocotools import mask as mask_util + +from sam3.train.data.sam3_image_dataset import Datapoint +from torchvision.ops import masks_to_boxes + + +def sample_points_from_rle(rle, n_points, mode, box=None, normalize=True): + """ + Sample random points from a mask provided in COCO RLE format. 'mode' + 'mode' is in ["centered", "random_mask", "random_box"] + "centered": points are sampled farthest from the mask edges and each other + "random_mask": points are sampled uniformly from the mask + "random_box": points are sampled uniformly from the annotation's box + 'box' must be provided if 'mode' is "random_box". + If 'normalize' is true, points are in [0,1], relative to mask h,w. + """ + mask = np.ascontiguousarray(mask_util.decode(rle)) + points = sample_points_from_mask(mask, n_points, mode, box) + + if normalize: + h, w = mask.shape + norm = np.array([w, h, 1.0])[None, :] + points = points / norm + + return points + + +def sample_points_from_mask(mask, n_points, mode, box=None): + if mode == "centered": + points = center_positive_sample(mask, n_points) + elif mode == "random_mask": + points = uniform_positive_sample(mask, n_points) + elif mode == "random_box": + assert box is not None, "'random_box' mode requires a provided box." + points = uniform_sample_from_box(mask, box, n_points) + else: + raise ValueError(f"Unknown point sampling mode {mode}.") + return points + + +def uniform_positive_sample(mask, n_points): + """ + Samples positive points uniformly from the mask. Only integer pixel + values are sampled. + """ + # Sampling directly from the uncompressed RLE would be faster but is + # likely unnecessary. + mask_points = np.stack(np.nonzero(mask), axis=0).transpose(1, 0) + assert len(mask_points) > 0, "Can't sample positive points from an empty mask." + selected_idxs = np.random.randint(low=0, high=len(mask_points), size=n_points) + selected_points = mask_points[selected_idxs] + + selected_points = selected_points[:, ::-1] # (y, x) -> (x, y) + labels = np.ones((len(selected_points), 1)) + selected_points = np.concatenate([selected_points, labels], axis=1) + + return selected_points + + +def center_positive_sample(mask, n_points): + """ + Samples points farthest from mask edges (by distance transform) + and subsequent points also farthest from each other. Each new point + sampled is treated as an edge for future points. Edges of the image are + treated as edges of the mask. + """ + + # Pad mask by one pixel on each end to assure distance transform + # avoids edges + padded_mask = np.pad(mask, 1) + + points = [] + for _ in range(n_points): + assert np.max(mask) > 0, "Can't sample positive points from an empty mask." + dist = cv2.distanceTransform(padded_mask, cv2.DIST_L2, 0) + point = np.unravel_index(dist.argmax(), dist.shape) + # Mark selected point as background so next point avoids it + padded_mask[point[0], point[1]] = 0 + points.append(point[::-1]) # (y, x) -> (x, y) + + points = np.stack(points, axis=0) + points = points - 1 # Subtract left/top padding of 1 + labels = np.ones((len(points), 1)) + points = np.concatenate([points, labels], axis=1) + + return points + + +def uniform_sample_from_box(mask, box, n_points): + """ + Sample points uniformly from the provided box. The points' labels + are determined by the provided mask. Does not guarantee a positive + point is sampled. The box is assumed unnormalized in XYXY format. + Points are sampled at integer values. + """ + + # Since lower/right edges are exclusive, ceil can be applied to all edges + int_box = np.ceil(box) + + x = np.random.randint(low=int_box[0], high=int_box[2], size=n_points) + y = np.random.randint(low=int_box[1], high=int_box[3], size=n_points) + labels = mask[y, x] + points = np.stack([x, y, labels], axis=1) + + return points + + +def rescale_box_xyxy(box, factor, imsize=None): + """ + Rescale a box providing in unnormalized XYXY format, fixing the center. + If imsize is provided, clamp to the image. + """ + cx, cy = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2 + w, h = box[2] - box[0], box[3] - box[1] + + new_w, new_h = factor * w, factor * h + + new_x0, new_y0 = cx - new_w / 2, cy - new_h / 2 + new_x1, new_y1 = cx + new_w / 2, cy + new_h / 2 + + if imsize is not None: + new_x0 = max(min(new_x0, imsize[1]), 0) + new_x1 = max(min(new_x1, imsize[1]), 0) + new_y0 = max(min(new_y0, imsize[0]), 0) + new_y1 = max(min(new_y1, imsize[0]), 0) + + return [new_x0, new_y0, new_x1, new_y1] + + +def noise_box(box, im_size, box_noise_std, box_noise_max, min_box_area): + if box_noise_std <= 0.0: + return box + noise = box_noise_std * torch.randn(size=(4,)) + w, h = box[2] - box[0], box[3] - box[1] + scale_factor = torch.tensor([w, h, w, h]) + noise = noise * scale_factor + if box_noise_max is not None: + noise = torch.clamp(noise, -box_noise_max, box_noise_max) + input_box = box + noise + # Clamp to maximum image size + img_clamp = torch.tensor([im_size[1], im_size[0], im_size[1], im_size[0]]) + input_box = torch.maximum(input_box, torch.zeros_like(input_box)) + input_box = torch.minimum(input_box, img_clamp) + if (input_box[2] - input_box[0]) * (input_box[3] - input_box[1]) <= min_box_area: + return box + + return input_box + + +class RandomGeometricInputsAPI: + """ + For geometric queries, replaces the input box or points with a random + one sampled from the GT mask. Segments must be provided for objects + that are targets of geometric queries, and must be binary masks. Existing + point and box queries in the datapoint will be ignored and completely replaced. + Will sample points and boxes in XYXY format in absolute pixel space. + + Geometry queries are currently determined by taking any query whose + query text is a set value. + + Args: + num_points (int or (int, int)): how many points to sample. If a tuple, + sample a random number of points uniformly over the inclusive range. + box_chance (float): fraction of time a box is sampled. A box will replace + one sampled point. + box_noise_std (float): if greater than 0, add noise to the sampled boxes + with this std. Noise is relative to the length of the box side. + box_noise_max (int): if not none, truncate any box noise larger than this + in terms of absolute pixels. + resample_box_from_mask (bool): if True, any sampled box will be determined + by finding the extrema of the provided mask. If False, the bbox provided + in the target object will be used. + point_sample_mode (str): In ["centered", "random_mask", "random_box"], + controlling how points are sampled: + "centered": points are sampled farthest from the mask edges and each other + "random_mask": points are sampled uniformly from the mask + "random_box": points are sampled uniformly from the annotation's box + Note that "centered" may be too slow for on-line generation. + geometric_query_str (str): what string in query_text indicates a + geometry query. + minimum_box_area (float): sampled boxes with area this size or smaller after + noising will use the original box instead. It is the input's responsibility + to avoid original boxes that violate necessary area bounds. + concat_points (bool): if True, any sampled points will be added to existing + ones instead of replacing them. + + """ + + def __init__( + self, + num_points, + box_chance, + box_noise_std=0.0, + box_noise_max=None, + minimum_box_area=0.0, + resample_box_from_mask=False, + point_sample_mode="random_mask", + sample_box_scale_factor=1.0, + geometric_query_str="geometric", + concat_points=False, + ): + self.num_points = num_points + if not isinstance(self.num_points, int): + # Convert from inclusive range to exclusive range expected by torch + self.num_points[1] += 1 + self.num_points = tuple(self.num_points) + self.box_chance = box_chance + self.box_noise_std = box_noise_std + self.box_noise_max = box_noise_max + self.minimum_box_area = minimum_box_area + self.resample_box_from_mask = resample_box_from_mask + self.point_sample_mode = point_sample_mode + assert point_sample_mode in [ + "centered", + "random_mask", + "random_box", + ], "Unknown point sample mode." + self.geometric_query_str = geometric_query_str + self.concat_points = concat_points + self.sample_box_scale_factor = sample_box_scale_factor + + def _sample_num_points_and_if_box(self): + if isinstance(self.num_points, tuple): + n_points = torch.randint( + low=self.num_points[0], high=self.num_points[1], size=(1,) + ).item() + else: + n_points = self.num_points + if self.box_chance > 0.0: + use_box = torch.rand(size=(1,)).item() < self.box_chance + n_points -= int(use_box) # box stands in for one point + else: + use_box = False + return n_points, use_box + + def _get_original_box(self, target_object): + if not self.resample_box_from_mask: + return target_object.bbox + mask = target_object.segment + return masks_to_boxes(mask[None, :, :])[0] + + def _get_target_object(self, datapoint, query): + img = datapoint.images[query.image_id] + targets = query.object_ids_output + assert ( + len(targets) == 1 + ), "Geometric queries only support a single target object." + target_idx = targets[0] + return img.objects[target_idx] + + def __call__(self, datapoint, **kwargs): + for query in datapoint.find_queries: + if query.query_text != self.geometric_query_str: + continue + + target_object = self._get_target_object(datapoint, query) + n_points, use_box = self._sample_num_points_and_if_box() + box = self._get_original_box(target_object) + + mask = target_object.segment + if n_points > 0: + # FIXME: The conversion to numpy and back to reuse code + # is awkward, but this is all in the dataloader worker anyway + # on CPU and so I don't think it should matter. + if self.sample_box_scale_factor != 1.0: + sample_box = rescale_box_xyxy( + box.numpy(), self.sample_box_scale_factor, mask.shape + ) + else: + sample_box = box.numpy() + input_points = sample_points_from_mask( + mask.numpy(), + n_points, + self.point_sample_mode, + sample_box, + ) + input_points = torch.as_tensor(input_points) + input_points = input_points[None, :, :] + if self.concat_points and query.input_points is not None: + input_points = torch.cat([query.input_points, input_points], dim=1) + else: + input_points = query.input_points if self.concat_points else None + + if use_box: + w, h = datapoint.images[query.image_id].size + input_box = noise_box( + box, + (h, w), + box_noise_std=self.box_noise_std, + box_noise_max=self.box_noise_max, + min_box_area=self.minimum_box_area, + ) + input_box = input_box[None, :] + else: + input_box = query.input_bbox if self.concat_points else None + + query.input_points = input_points + query.input_bbox = input_box + + return datapoint + + +class RandomizeInputBbox: + """ + Simplified version of the geometric transform that only deals with input boxes + """ + + def __init__( + self, + box_noise_std=0.0, + box_noise_max=None, + minimum_box_area=0.0, + ): + self.box_noise_std = box_noise_std + self.box_noise_max = box_noise_max + self.minimum_box_area = minimum_box_area + + def __call__(self, datapoint: Datapoint, **kwargs): + for query in datapoint.find_queries: + if query.input_bbox is None: + continue + + img = datapoint.images[query.image_id].data + if isinstance(img, PILImage.Image): + w, h = img.size + else: + assert isinstance(img, torch.Tensor) + h, w = img.shape[-2:] + + for box_id in range(query.input_bbox.shape[0]): + query.input_bbox[box_id, :] = noise_box( + query.input_bbox[box_id, :].view(4), + (h, w), + box_noise_std=self.box_noise_std, + box_noise_max=self.box_noise_max, + min_box_area=self.minimum_box_area, + ).view(1, 4) + + return datapoint diff --git a/detect_tools/sam3/sam3/train/transforms/segmentation.py b/detect_tools/sam3/sam3/train/transforms/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..3e97db0f36b46ac99ad750c3bd4b8e55810f8f15 --- /dev/null +++ b/detect_tools/sam3/sam3/train/transforms/segmentation.py @@ -0,0 +1,157 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import numpy as np +import pycocotools.mask as mask_utils +import torch + +import torchvision.transforms.functional as F +from PIL import Image as PILImage + +from sam3.model.box_ops import masks_to_boxes + +from sam3.train.data.sam3_image_dataset import Datapoint + + +class InstanceToSemantic(object): + """Convert instance segmentation to semantic segmentation.""" + + def __init__(self, delete_instance=True, use_rle=False): + self.delete_instance = delete_instance + self.use_rle = use_rle + + def __call__(self, datapoint: Datapoint, **kwargs): + for fquery in datapoint.find_queries: + h, w = datapoint.images[fquery.image_id].size + + if self.use_rle: + all_segs = [ + datapoint.images[fquery.image_id].objects[obj_id].segment + for obj_id in fquery.object_ids_output + ] + if len(all_segs) > 0: + # we need to double check that all rles are the correct size + # Otherwise cocotools will fail silently to an empty [0,0] mask + for seg in all_segs: + assert seg["size"] == all_segs[0]["size"], ( + "Instance segments have inconsistent sizes. " + f"Found sizes {seg['size']} and {all_segs[0]['size']}" + ) + fquery.semantic_target = mask_utils.merge(all_segs) + else: + # There is no good way to create an empty RLE of the correct size + # We resort to converting an empty box to RLE + fquery.semantic_target = mask_utils.frPyObjects( + np.array([[0, 0, 0, 0]], dtype=np.float64), h, w + )[0] + + else: + # `semantic_target` is uint8 and remains uint8 throughout the transforms + # (it contains binary 0 and 1 values just like `segment` for each object) + fquery.semantic_target = torch.zeros((h, w), dtype=torch.uint8) + for obj_id in fquery.object_ids_output: + segment = datapoint.images[fquery.image_id].objects[obj_id].segment + if segment is not None: + assert ( + isinstance(segment, torch.Tensor) + and segment.dtype == torch.uint8 + ) + fquery.semantic_target |= segment + + if self.delete_instance: + for img in datapoint.images: + for obj in img.objects: + del obj.segment + obj.segment = None + + return datapoint + + +class RecomputeBoxesFromMasks: + """Recompute bounding boxes from masks.""" + + def __call__(self, datapoint: Datapoint, **kwargs): + for img in datapoint.images: + for obj in img.objects: + # Note: if the mask is empty, the bounding box will be undefined + # The empty targets should be subsequently filtered + obj.bbox = masks_to_boxes(obj.segment) + obj.area = obj.segment.sum().item() + + return datapoint + + +class DecodeRle: + """This transform decodes RLEs into binary segments. + Implementing it as a transforms allows lazy loading. Some transforms (eg query filters) + may be deleting masks, so decoding them from the beginning is wasteful. + + This transforms needs to be called before any kind of geometric manipulation + """ + + def __call__(self, datapoint: Datapoint, **kwargs): + imgId2size = {} + warning_shown = False + for imgId, img in enumerate(datapoint.images): + if isinstance(img.data, PILImage.Image): + img_w, img_h = img.data.size + elif isinstance(img.data, torch.Tensor): + img_w, img_h = img.data.shape[-2:] + else: + raise RuntimeError(f"Unexpected image type {type(img.data)}") + + imgId2size[imgId] = (img_h, img_w) + + for obj in img.objects: + if obj.segment is not None and not isinstance( + obj.segment, torch.Tensor + ): + if mask_utils.area(obj.segment) == 0: + print("Warning, empty mask found, approximating from box") + obj.segment = torch.zeros(img_h, img_w, dtype=torch.uint8) + x1, y1, x2, y2 = obj.bbox.int().tolist() + obj.segment[y1 : max(y2, y1 + 1), x1 : max(x1 + 1, x2)] = 1 + else: + obj.segment = mask_utils.decode(obj.segment) + # segment is uint8 and remains uint8 throughout the transforms + obj.segment = torch.tensor(obj.segment).to(torch.uint8) + + if list(obj.segment.shape) != [img_h, img_w]: + # Should not happen often, but adding for security + if not warning_shown: + print( + f"Warning expected instance segmentation size to be {[img_h, img_w]} but found {list(obj.segment.shape)}" + ) + # Printing only once per datapoint to avoid spam + warning_shown = True + + obj.segment = F.resize( + obj.segment[None], (img_h, img_w) + ).squeeze(0) + + assert list(obj.segment.shape) == [img_h, img_w] + + warning_shown = False + for query in datapoint.find_queries: + if query.semantic_target is not None and not isinstance( + query.semantic_target, torch.Tensor + ): + query.semantic_target = mask_utils.decode(query.semantic_target) + # segment is uint8 and remains uint8 throughout the transforms + query.semantic_target = torch.tensor(query.semantic_target).to( + torch.uint8 + ) + if tuple(query.semantic_target.shape) != imgId2size[query.image_id]: + if not warning_shown: + print( + f"Warning expected semantic segmentation size to be {imgId2size[query.image_id]} but found {tuple(query.semantic_target.shape)}" + ) + # Printing only once per datapoint to avoid spam + warning_shown = True + + query.semantic_target = F.resize( + query.semantic_target[None], imgId2size[query.image_id] + ).squeeze(0) + + assert tuple(query.semantic_target.shape) == imgId2size[query.image_id] + + return datapoint diff --git a/detect_tools/sam3/sam3/train/utils/__init__.py b/detect_tools/sam3/sam3/train/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46d37d2aaef52ec7de01999516a2b8c3e1fa4986 --- /dev/null +++ b/detect_tools/sam3/sam3/train/utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved diff --git a/detect_tools/sam3/sam3/train/utils/checkpoint_utils.py b/detect_tools/sam3/sam3/train/utils/checkpoint_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7f2736adfd593d50afb91afd57e391423979d173 --- /dev/null +++ b/detect_tools/sam3/sam3/train/utils/checkpoint_utils.py @@ -0,0 +1,358 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + + +import contextlib +import fnmatch +import logging +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import numpy as np +import torch +import torch.nn as nn +from iopath.common.file_io import g_pathmgr +from torch.jit._script import RecursiveScriptModule + + +def unix_pattern_to_parameter_names( + constraints: List[str], all_parameter_names: Sequence[str] +) -> Union[None, Set[str]]: + """ + Go through the list of parameter names and select those that match + any of the provided constraints + """ + parameter_names = [] + for param_name in constraints: + matching_parameters = set(fnmatch.filter(all_parameter_names, param_name)) + assert ( + len(matching_parameters) > 0 + ), f"param_names {param_name} don't match any param in the given names." + parameter_names.append(matching_parameters) + return set.union(*parameter_names) + + +def filter_params_matching_unix_pattern( + patterns: List[str], state_dict: Dict[str, torch.Tensor] +) -> Dict[str, torch.Tensor]: + """ + Remove from the state dictionary the parameters matching the provided unix patterns + + Args: + patterns: the list of unix patterns to exclude + state_dict: the dictionary to filter + + Returns: + A new state dictionary + """ + if len(patterns) == 0: + return {} + + all_keys = list(state_dict.keys()) + included_keys = unix_pattern_to_parameter_names(patterns, all_keys) + return {k: state_dict[k] for k in included_keys} + + +def exclude_params_matching_unix_pattern( + patterns: List[str], state_dict: Dict[str, torch.Tensor] +) -> Dict[str, torch.Tensor]: + """ + Remove from the state dictionary the parameters matching the provided unix patterns + + Args: + patterns: the list of unix patterns to exclude + state_dict: the dictionary to filter + + Returns: + A new state dictionary + """ + if len(patterns) == 0: + return state_dict + + all_keys = list(state_dict.keys()) + excluded_keys = unix_pattern_to_parameter_names(patterns, all_keys) + return {k: v for k, v in state_dict.items() if k not in excluded_keys} + + +def _get_state_dict_summary(state_dict: Dict[str, torch.Tensor]): + keys = [] + trace = [] + for k, v in state_dict.items(): + keys.append(k) + trace.append(v.sum().item()) + trace = np.array(trace)[np.argsort(keys)] + return trace + + +def assert_skipped_parameters_are_frozen(model: nn.Module, patterns: List[str]): + """ + Verifies that all the parameters matching the provided patterns + are frozen - this acts as a safeguard when ignoring parameter + when saving checkpoints - if the parameters are in fact trainable + """ + if not patterns: + return + + frozen_state_dict = filter_params_matching_unix_pattern( + patterns=patterns, state_dict=model.state_dict() + ) + non_frozen_keys = { + n + for n, p in model.named_parameters() + if n in frozen_state_dict and p.requires_grad + } + if non_frozen_keys: + raise ValueError( + f"Parameters excluded with `skip_saving_parameters` should be frozen: {non_frozen_keys}" + ) + + +@contextlib.contextmanager +def with_check_parameter_frozen( + model: nn.Module, patterns: List[str], disabled: bool = True +): + """ + Context manager that inspects a model surrounding a piece of code + and verifies if the model has been updated by this piece of code + + The function will raise an exception if the model has been updated + on at least one of the parameter that matches one of the pattern + + Args: + model: the model that might have been updated + patterns: for the parameters we want to observe + allowed: + """ + if not patterns or disabled: + yield + return + + frozen_state_dict = filter_params_matching_unix_pattern( + patterns=patterns, state_dict=model.state_dict() + ) + summary_before = _get_state_dict_summary(frozen_state_dict) + + yield + + frozen_state_dict = filter_params_matching_unix_pattern( + patterns=patterns, state_dict=model.state_dict() + ) + summary_after = _get_state_dict_summary(frozen_state_dict) + + if not np.allclose(summary_before, summary_after, atol=1e-6): + raise ValueError( + f""" + The `model_weight_initializer` has initialized parameters frozen with `skip_saving_parameters`. + You can resolve this error by either initializing those parameters from within the model definition + or using the flag `trainer.checkpoint.initialize_after_preemption` to True. + """ + ) + + +class CkptExcludeKernel: + """ + Removes the keys from the given model state_dict that match the key_pattern. + + Args: + key_pattern: Patterns used to select the keys in the state_dict + that are eligible for this kernel. + """ + + def __init__(self, key_pattern: List[str]): + self.key_pattern = key_pattern + + def __call__(self, state_dict: Dict): + """ + Args: + state_dict: A dictionary representing the given checkpoint's state dict. + """ + if len(self.key_pattern) == 0: + return state_dict + exclude_keys = unix_pattern_to_parameter_names( + self.key_pattern, state_dict.keys() + ) + return {k: v for k, v in state_dict.items() if k not in exclude_keys} + + +def load_checkpoint( + path_list: List[str], + pick_recursive_keys: Optional[List[str]] = None, + map_location: str = "cpu", +) -> Any: + """ + Loads a checkpoint from the specified path. + + Args: + path_list: A list of paths which contain the checkpoint. Each element + is tried (in order) until a file that exists is found. That file is then + used to read the checkpoint. + pick_recursive_keys: Picks sub dicts from the loaded checkpoint if not None. + For pick_recursive_keys = ["a", "b"], will return checkpoint_dict["a"]["b"] + map_location (str): a function, torch.device, string or a dict specifying how to + remap storage locations + + Returns: Model with the matchin pre-trained weights loaded. + """ + path_exists = False + for path in path_list: + if g_pathmgr.exists(path): + path_exists = True + break + + if not path_exists: + raise ValueError(f"No path exists in {path_list}") + + with g_pathmgr.open(path, "rb") as f: + checkpoint = torch.load(f, map_location=map_location) + + logging.info(f"Loaded checkpoint from {path}") + if pick_recursive_keys is not None: + for key in pick_recursive_keys: + checkpoint = checkpoint[key] + return checkpoint + + +def get_state_dict(checkpoint, ckpt_state_dict_keys): + if isinstance(checkpoint, RecursiveScriptModule): + # This is a torchscript JIT model + return checkpoint.state_dict() + pre_train_dict = checkpoint + for i, key in enumerate(ckpt_state_dict_keys): + if (isinstance(pre_train_dict, Mapping) and key not in pre_train_dict) or ( + isinstance(pre_train_dict, Sequence) and key >= len(pre_train_dict) + ): + key_str = ( + '["' + '"]["'.join(list(map(ckpt_state_dict_keys[:i], str))) + '"]' + ) + raise KeyError( + f"'{key}' not found in checkpoint{key_str} " + f"with keys: {pre_train_dict.keys()}" + ) + pre_train_dict = pre_train_dict[key] + return pre_train_dict + + +def load_checkpoint_and_apply_kernels( + checkpoint_path: str, + checkpoint_kernels: List[Callable] = None, + ckpt_state_dict_keys: Tuple[str] = ("state_dict",), + map_location: str = "cpu", +) -> nn.Module: + """ + Performs checkpoint loading with a variety of pre-processing kernel applied in + sequence. + + Args: + checkpoint_path (str): Path to the checkpoint. + checkpoint_kernels List(Callable): A list of checkpoint processing kernels + to apply in the specified order. Supported kernels include `CkptIncludeKernel`, + `CkptExcludeKernel`, etc. These kernels are applied in the + given order. + ckpt_state_dict_keys (str): Keys containing the model state dict. + map_location (str): a function, torch.device, string or a dict specifying how to + remap storage locations + + Returns: Model with the matchin pre-trained weights loaded. + """ + assert g_pathmgr.exists(checkpoint_path), "Checkpoint '{}' not found".format( + checkpoint_path + ) + + # Load the checkpoint on CPU to avoid GPU mem spike. + with g_pathmgr.open(checkpoint_path, "rb") as f: + checkpoint = torch.load(f, map_location=map_location) + + pre_train_dict = get_state_dict(checkpoint, ckpt_state_dict_keys) + + # Not logging into info etc since it's a huge log + logging.debug( + "Loaded Checkpoint State Dict pre-kernel application: %s" + % str(", ".join(list(pre_train_dict.keys()))) + ) + # Apply kernels + if checkpoint_kernels is not None: + for f in checkpoint_kernels: + pre_train_dict = f(state_dict=pre_train_dict) + + logging.debug( + "Loaded Checkpoint State Dict Post-kernel application %s" + % str(", ".join(list(pre_train_dict.keys()))) + ) + + return pre_train_dict + + +def check_load_state_dict_errors( + missing_keys, + unexpected_keys, + strict: bool, + ignore_missing_keys: List[str] = None, + ignore_unexpected_keys: List[str] = None, +): + if ignore_missing_keys is not None and len(ignore_missing_keys) > 0: + ignored_keys = unix_pattern_to_parameter_names( + ignore_missing_keys, missing_keys + ) + missing_keys = [key for key in missing_keys if key not in ignored_keys] + + if ignore_unexpected_keys is not None and len(ignore_unexpected_keys) > 0: + ignored_unexpected_keys = unix_pattern_to_parameter_names( + ignore_unexpected_keys, unexpected_keys + ) + unexpected_keys = [ + key for key in unexpected_keys if key not in ignored_unexpected_keys + ] + + err = "State key mismatch." + if unexpected_keys: + err += f" Unexpected keys: {unexpected_keys}." + if missing_keys: + err += f" Missing keys: {missing_keys}." + + if unexpected_keys or missing_keys: + logging.warning(err) + if unexpected_keys or strict: + raise KeyError(err) + + +def load_state_dict_into_model( + state_dict: Dict, + model: nn.Module, + strict: bool = True, + ignore_missing_keys: List[str] = None, + ignore_unexpected_keys: List[str] = None, + checkpoint_kernels: List[Callable] = None, +): + """ + Loads a state dict into the given model. + + Args: + state_dict: A dictionary containing the model's + state dict, or a subset if strict is False + model: Model to load the checkpoint weights into + strict: raise if the state_dict has missing state keys + ignore_missing_keys: unix pattern of keys to ignore + """ + # Apply kernels + if checkpoint_kernels is not None: + for f in checkpoint_kernels: + state_dict = f(state_dict=state_dict) + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + + check_load_state_dict_errors( + missing_keys, + unexpected_keys, + strict=strict, + ignore_missing_keys=ignore_missing_keys, + ignore_unexpected_keys=ignore_unexpected_keys, + ) + return model diff --git a/detect_tools/sam3/sam3/train/utils/distributed.py b/detect_tools/sam3/sam3/train/utils/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..3c87a91119c1fa1ebee78cf476c8db6c737053a6 --- /dev/null +++ b/detect_tools/sam3/sam3/train/utils/distributed.py @@ -0,0 +1,585 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import datetime +import functools +import io +import logging +import os +import random +import tempfile +import time +from typing import Any, Callable, List, Tuple + +import torch +import torch.autograd as autograd +import torch.distributed as dist + + +# Default to GPU 0 +_cuda_device_index: int = 0 + +# Setting _cuda_device_index to -1 internally implies that we should use CPU +_CPU_DEVICE_INDEX = -1 +_PRIMARY_RANK = 0 + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + + if dist.get_backend() == "nccl": + # Increase timeout from 1800 sec to 43200 sec (12 hr) to avoid some processes + # being much slower than others causing a timeout (which can happen in relation + # or LVIS class mAP evaluation). + timeout = 43200 + return dist.new_group( + backend="gloo", + timeout=datetime.timedelta(seconds=timeout), + ) + + return dist.group.WORLD + + +def is_main_process(): + """Return true if the current process is the main one""" + return get_rank() == 0 + + +def all_gather_via_filesys(data, filesys_save_dir=None, gather_to_rank_0_only=False): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors), similar to + `all_gather` above, but using filesystem instead of collective ops. + + If gather_to_rank_0_only is True, only rank 0 will load the gathered object list + (and other ranks will have an empty list). + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + print("gathering via files") + cpu_group = _get_global_gloo_group() + + # if unspecified, we will save to the current python file dir + if filesys_save_dir is not None: + save_dir = filesys_save_dir + elif "EXP_DIR" in os.environ: + save_dir = os.environ["EXP_DIR"] + else: + # try the same directory where the code is stored + save_dir = filesys_save_dir or os.path.dirname(__file__) + save_dir = os.path.join(save_dir, "all_gather_via_filesys") + if is_main_process(): + os.makedirs(save_dir, exist_ok=True) + + # use a timestamp and salt to distinguish different all_gather + timestamp = int(time.time()) if is_main_process() else 0 + salt = random.randint(0, 2**31 - 1) if is_main_process() else 0 + # broadcast the timestamp and salt across ranks + # (all-reduce will do the broadcasting since only rank 0 is non-zero) + timestamp_and_salt = torch.tensor([timestamp, salt], dtype=torch.long) + dist.all_reduce(timestamp_and_salt, group=cpu_group) + timestamp, salt = timestamp_and_salt.tolist() + + # save the data to a file on the disk + rank_save = get_rank() + save_data_filename = f"data_to_gather_{timestamp}_{salt}_{rank_save}.pkl" + save_data_path = os.path.join(save_dir, save_data_filename) + assert not os.path.exists(save_data_path), f"{save_data_path} already exists" + torch.save(data, save_data_path) + dist.barrier(group=cpu_group) + + # read the data from the files + data_list = [] + if rank_save == 0 or not gather_to_rank_0_only: + for rank_load in range(world_size): + load_data_filename = f"data_to_gather_{timestamp}_{salt}_{rank_load}.pkl" + load_data_path = os.path.join(save_dir, load_data_filename) + assert os.path.exists(load_data_path), f"cannot read {save_data_path}" + data_list.append(torch.load(load_data_path, weights_only=False)) + dist.barrier(group=cpu_group) + + # delete the saved file + os.remove(save_data_path) + return data_list + + +def all_gather(data, force_cpu=False, force_filesys=False, filesys_save_dir=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + + world_size = get_world_size() + if world_size == 1: + return [data] + + if os.getenv("MDETR_FILESYS_REDUCE_RANK_0_ONLY") == "1": + return all_gather_via_filesys( + data, filesys_save_dir, gather_to_rank_0_only=True + ) + + if os.getenv("MDETR_FILESYS_REDUCE") == "1" or force_filesys: + return all_gather_via_filesys(data, filesys_save_dir) + + cpu_group = None + if os.getenv("MDETR_CPU_REDUCE") == "1" or force_cpu: + cpu_group = _get_global_gloo_group() + + buffer = io.BytesIO() + torch.save(data, buffer) + data_view = buffer.getbuffer() + device = "cuda" if cpu_group is None else "cpu" + tensor = torch.ByteTensor(data_view).to(device) + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long) + size_list = [ + torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size) + ] + if cpu_group is None: + dist.all_gather(size_list, local_size) + else: + print("gathering on cpu") + dist.all_gather(size_list, local_size, group=cpu_group) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + assert isinstance(local_size.item(), int) + local_size = int(local_size.item()) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device)) + if local_size != max_size: + padding = torch.empty( + size=(max_size - local_size,), dtype=torch.uint8, device=device + ) + tensor = torch.cat((tensor, padding), dim=0) + if cpu_group is None: + dist.all_gather(tensor_list, tensor) + else: + dist.all_gather(tensor_list, tensor, group=cpu_group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] + buffer = io.BytesIO(tensor.cpu().numpy()) + obj = torch.load(buffer, weights_only=False) + data_list.append(obj) + + return data_list + + +def convert_to_distributed_tensor(tensor: torch.Tensor) -> Tuple[torch.Tensor, str]: + """ + For some backends, such as NCCL, communication only works if the + tensor is on the GPU. This helper function converts to the correct + device and returns the tensor + original device. + """ + orig_device = "cpu" if not tensor.is_cuda else "gpu" + if ( + torch.distributed.is_available() + and torch.distributed.get_backend() == torch.distributed.Backend.NCCL + and not tensor.is_cuda + ): + tensor = tensor.cuda() + return (tensor, orig_device) + + +def convert_to_normal_tensor(tensor: torch.Tensor, orig_device: str) -> torch.Tensor: + """ + For some backends, such as NCCL, communication only works if the + tensor is on the GPU. This converts the tensor back to original device. + """ + if tensor.is_cuda and orig_device == "cpu": + tensor = tensor.cpu() + return tensor + + +def is_distributed_training_run() -> bool: + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and (torch.distributed.get_world_size() > 1) + ) + + +def is_primary() -> bool: + """ + Returns True if this is rank 0 of a distributed training job OR if it is + a single trainer job. Otherwise False. + """ + return get_rank() == _PRIMARY_RANK + + +def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor: + """ + Wrapper over torch.distributed.all_reduce for performing mean reduction + of tensor over all processes. + """ + return all_reduce_op( + tensor, + torch.distributed.ReduceOp.SUM, + lambda t: t / torch.distributed.get_world_size(), + ) + + +def all_reduce_sum(tensor: torch.Tensor) -> torch.Tensor: + """ + Wrapper over torch.distributed.all_reduce for performing sum + reduction of tensor over all processes in both distributed / + non-distributed scenarios. + """ + return all_reduce_op(tensor, torch.distributed.ReduceOp.SUM) + + +def all_reduce_min(tensor: torch.Tensor) -> torch.Tensor: + """ + Wrapper over torch.distributed.all_reduce for performing min + reduction of tensor over all processes in both distributed / + non-distributed scenarios. + """ + return all_reduce_op(tensor, torch.distributed.ReduceOp.MIN) + + +def all_reduce_max(tensor: torch.Tensor) -> torch.Tensor: + """ + Wrapper over torch.distributed.all_reduce for performing min + reduction of tensor over all processes in both distributed / + non-distributed scenarios. + """ + return all_reduce_op(tensor, torch.distributed.ReduceOp.MAX) + + +def all_reduce_op( + tensor: torch.Tensor, + op: torch.distributed.ReduceOp, + after_op_func: Callable[[torch.Tensor], torch.Tensor] = None, +) -> torch.Tensor: + """ + Wrapper over torch.distributed.all_reduce for performing + reduction of tensor over all processes in both distributed / + non-distributed scenarios. + """ + if is_distributed_training_run(): + tensor, orig_device = convert_to_distributed_tensor(tensor) + torch.distributed.all_reduce(tensor, op) + if after_op_func is not None: + tensor = after_op_func(tensor) + tensor = convert_to_normal_tensor(tensor, orig_device) + return tensor + + +def gather_tensors_from_all(tensor: torch.Tensor) -> List[torch.Tensor]: + """ + Wrapper over torch.distributed.all_gather for performing + 'gather' of 'tensor' over all processes in both distributed / + non-distributed scenarios. + """ + if tensor.ndim == 0: + # 0 dim tensors cannot be gathered. so unsqueeze + tensor = tensor.unsqueeze(0) + + if is_distributed_training_run(): + tensor, orig_device = convert_to_distributed_tensor(tensor) + gathered_tensors = [ + torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(gathered_tensors, tensor) + gathered_tensors = [ + convert_to_normal_tensor(_tensor, orig_device) + for _tensor in gathered_tensors + ] + else: + gathered_tensors = [tensor] + + return gathered_tensors + + +def gather_from_all(tensor: torch.Tensor) -> torch.Tensor: + gathered_tensors = gather_tensors_from_all(tensor) + gathered_tensor = torch.cat(gathered_tensors, 0) + return gathered_tensor + + +def broadcast(tensor: torch.Tensor, src: int = 0) -> torch.Tensor: + """ + Wrapper over torch.distributed.broadcast for broadcasting a tensor from the source + to all processes in both distributed / non-distributed scenarios. + """ + if is_distributed_training_run(): + tensor, orig_device = convert_to_distributed_tensor(tensor) + torch.distributed.broadcast(tensor, src) + tensor = convert_to_normal_tensor(tensor, orig_device) + return tensor + + +def barrier() -> None: + """ + Wrapper over torch.distributed.barrier, returns without waiting + if the distributed process group is not initialized instead of throwing error. + """ + if not torch.distributed.is_available() or not torch.distributed.is_initialized(): + return + torch.distributed.barrier() + + +def get_world_size() -> int: + """ + Simple wrapper for correctly getting worldsize in both distributed + / non-distributed settings + """ + return ( + torch.distributed.get_world_size() + if torch.distributed.is_available() and torch.distributed.is_initialized() + else 1 + ) + + +def get_rank() -> int: + """ + Simple wrapper for correctly getting rank in both distributed + / non-distributed settings + """ + return ( + torch.distributed.get_rank() + if torch.distributed.is_available() and torch.distributed.is_initialized() + else 0 + ) + + +def get_primary_rank() -> int: + return _PRIMARY_RANK + + +def set_cuda_device_index(idx: int) -> None: + global _cuda_device_index + _cuda_device_index = idx + torch.cuda.set_device(_cuda_device_index) + + +def set_cpu_device() -> None: + global _cuda_device_index + _cuda_device_index = _CPU_DEVICE_INDEX + + +def get_cuda_device_index() -> int: + return _cuda_device_index + + +def init_distributed_data_parallel_model( + model: torch.nn.Module, + broadcast_buffers: bool = False, + find_unused_parameters: bool = True, + bucket_cap_mb: int = 25, +) -> torch.nn.parallel.DistributedDataParallel: + global _cuda_device_index + + if _cuda_device_index == _CPU_DEVICE_INDEX: + # CPU-only model, don't specify device + return torch.nn.parallel.DistributedDataParallel( + model, + broadcast_buffers=broadcast_buffers, + find_unused_parameters=find_unused_parameters, + bucket_cap_mb=bucket_cap_mb, + ) + else: + # GPU model + return torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[_cuda_device_index], + output_device=_cuda_device_index, + broadcast_buffers=broadcast_buffers, + find_unused_parameters=find_unused_parameters, + bucket_cap_mb=bucket_cap_mb, + ) + + +def broadcast_object(obj: Any, src: int = _PRIMARY_RANK, use_disk: bool = True) -> Any: + """Broadcast an object from a source to all workers. + + Args: + obj: Object to broadcast, must be serializable + src: Source rank for broadcast (default is primary) + use_disk: If enabled, removes redundant CPU memory copies by writing to + disk + """ + # Either broadcast from primary to the fleet (default), + # or use the src setting as the original rank + if get_rank() == src: + # Emit data + buffer = io.BytesIO() + torch.save(obj, buffer) + data_view = buffer.getbuffer() + length_tensor = torch.LongTensor([len(data_view)]) + length_tensor = broadcast(length_tensor, src=src) + data_tensor = torch.ByteTensor(data_view) + data_tensor = broadcast(data_tensor, src=src) + else: + # Fetch from the source + length_tensor = torch.LongTensor([0]) + length_tensor = broadcast(length_tensor, src=src) + data_tensor = torch.empty([length_tensor.item()], dtype=torch.uint8) + data_tensor = broadcast(data_tensor, src=src) + if use_disk: + with tempfile.TemporaryFile("r+b") as f: + f.write(data_tensor.numpy()) + # remove reference to the data tensor and hope that Python garbage + # collects it + del data_tensor + f.seek(0) + obj = torch.load(f, weights_only=False) + else: + buffer = io.BytesIO(data_tensor.numpy()) + obj = torch.load(buffer, weights_only=False) + return obj + + +def all_gather_tensor(tensor: torch.Tensor, world_size=None): + if world_size is None: + world_size = get_world_size() + # make contiguous because NCCL won't gather the tensor otherwise + assert tensor.is_contiguous(), f"{tensor.shape} is not contiguous!" + tensor, orig_device = convert_to_distributed_tensor(tensor) + tensor_all = [torch.ones_like(tensor) for _ in range(world_size)] + dist.all_gather(tensor_all, tensor, async_op=False) # performance opt + tensor_all = [ + convert_to_normal_tensor(tensor, orig_device) for tensor in tensor_all + ] + return tensor_all + + +def all_gather_batch(tensors: List[torch.Tensor]): + """ + Performs all_gather operation on the provided tensors. + """ + # Queue the gathered tensors + world_size = get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + tensor_list = [] + output_tensor = [] + for tensor in tensors: + tensor_all = all_gather_tensor(tensor, world_size) + tensor_list.append(tensor_all) + + for tensor_all in tensor_list: + output_tensor.append(torch.cat(tensor_all, dim=0)) + return output_tensor + + +class GatherLayer(autograd.Function): + """ + Gather tensors from all workers with support for backward propagation: + This implementation does not cut the gradients as torch.distributed.all_gather does. + """ + + @staticmethod + def forward(ctx, x): + output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] + dist.all_gather(output, x) + return tuple(output) + + @staticmethod + def backward(ctx, *grads): + all_gradients = torch.stack(grads) + dist.all_reduce(all_gradients) + return all_gradients[dist.get_rank()] + + +def all_gather_batch_with_grad(tensors): + """ + Performs all_gather operation on the provided tensors. + Graph remains connected for backward grad computation. + """ + # Queue the gathered tensors + world_size = get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + tensor_list = [] + output_tensor = [] + + for tensor in tensors: + tensor_all = GatherLayer.apply(tensor) + tensor_list.append(tensor_all) + + for tensor_all in tensor_list: + output_tensor.append(torch.cat(tensor_all, dim=0)) + return output_tensor + + +def unwrap_ddp_if_wrapped(model): + if isinstance(model, torch.nn.parallel.DistributedDataParallel): + return model.module + return model + + +def create_new_process_group(group_size): + """ + Creates process groups of a gives `group_size` and returns + process group that current GPU participates in. + + `group_size` must divide the total number of GPUs (world_size). + + Modified from + https://github.com/NVIDIA/apex/blob/4e1ae43f7f7ac69113ef426dd15f37123f0a2ed3/apex/parallel/__init__.py#L60 + + Args: + group_size (int): number of GPU's to collaborate for sync bn + """ + + assert group_size > 0 + + world_size = torch.distributed.get_world_size() + if world_size <= 8: + if group_size > world_size: + logging.warning( + f"Requested group size [{group_size}] > world size [{world_size}]. " + "Assuming local debug run and capping it to world size." + ) + group_size = world_size + assert world_size >= group_size + assert world_size % group_size == 0 + + group = None + for group_num in range(world_size // group_size): + group_ids = range(group_num * group_size, (group_num + 1) * group_size) + cur_group = torch.distributed.new_group(ranks=group_ids) + if torch.distributed.get_rank() // group_size == group_num: + group = cur_group + # can not drop out and return here, every process must go through creation of all subgroups + + assert group is not None + return group + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def gather_to_rank_0_via_filesys(data, filesys_save_dir=None): + """ + Gather any picklable data to rank 0 via filesystem, using all_gather_via_filesys. + """ + return all_gather_via_filesys(data, filesys_save_dir, gather_to_rank_0_only=True) diff --git a/detect_tools/sam3/sam3/train/utils/logger.py b/detect_tools/sam3/sam3/train/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..127f6c8c93d05be6a8654f39a0c428d0d911bf13 --- /dev/null +++ b/detect_tools/sam3/sam3/train/utils/logger.py @@ -0,0 +1,241 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import atexit +import functools +import logging +import sys +import uuid +from typing import Any, Dict, Optional, Union + +from hydra.utils import instantiate + +from iopath.common.file_io import g_pathmgr +from numpy import ndarray + +from sam3.train.utils.train_utils import get_machine_local_and_dist_rank, makedir +from torch import Tensor +from torch.utils.tensorboard import SummaryWriter + +Scalar = Union[Tensor, ndarray, int, float] + + +def make_tensorboard_logger(log_dir: str, **writer_kwargs: Any): + makedir(log_dir) + summary_writer_method = SummaryWriter + return TensorBoardLogger( + path=log_dir, summary_writer_method=summary_writer_method, **writer_kwargs + ) + + +class TensorBoardWriterWrapper: + """ + A wrapper around a SummaryWriter object. + """ + + def __init__( + self, + path: str, + *args: Any, + filename_suffix: str = None, + summary_writer_method: Any = SummaryWriter, + **kwargs: Any, + ) -> None: + """Create a new TensorBoard logger. + On construction, the logger creates a new events file that logs + will be written to. If the environment variable `RANK` is defined, + logger will only log if RANK = 0. + + NOTE: If using the logger with distributed training: + - This logger can call collective operations + - Logs will be written on rank 0 only + - Logger must be constructed synchronously *after* initializing distributed process group. + + Args: + path (str): path to write logs to + *args, **kwargs: Extra arguments to pass to SummaryWriter + """ + self._writer: Optional[SummaryWriter] = None + _, self._rank = get_machine_local_and_dist_rank() + self._path: str = path + if self._rank == 0: + logging.info( + f"TensorBoard SummaryWriter instantiated. Files will be stored in: {path}" + ) + self._writer = summary_writer_method( + log_dir=path, + *args, + filename_suffix=filename_suffix or str(uuid.uuid4()), + **kwargs, + ) + else: + logging.debug( + f"Not logging meters on this host because env RANK: {self._rank} != 0" + ) + atexit.register(self.close) + + @property + def writer(self) -> Optional[SummaryWriter]: + return self._writer + + @property + def path(self) -> str: + return self._path + + def flush(self) -> None: + """Writes pending logs to disk.""" + + if not self._writer: + return + + self._writer.flush() + + def close(self) -> None: + """Close writer, flushing pending logs to disk. + Logs cannot be written after `close` is called. + """ + + if not self._writer: + return + + self._writer.close() + self._writer = None + + +class TensorBoardLogger(TensorBoardWriterWrapper): + """ + A simple logger for TensorBoard. + """ + + def log_dict(self, payload: Dict[str, Scalar], step: int) -> None: + """Add multiple scalar values to TensorBoard. + + Args: + payload (dict): dictionary of tag name and scalar value + step (int, Optional): step value to record + """ + if not self._writer: + return + for k, v in payload.items(): + self.log(k, v, step) + + def log(self, name: str, data: Scalar, step: int) -> None: + """Add scalar data to TensorBoard. + + Args: + name (string): tag name used to group scalars + data (float/int/Tensor): scalar data to log + step (int, optional): step value to record + """ + if not self._writer: + return + self._writer.add_scalar(name, data, global_step=step, new_style=True) + + def log_hparams( + self, hparams: Dict[str, Scalar], meters: Dict[str, Scalar] + ) -> None: + """Add hyperparameter data to TensorBoard. + + Args: + hparams (dict): dictionary of hyperparameter names and corresponding values + meters (dict): dictionary of name of meter and corersponding values + """ + if not self._writer: + return + self._writer.add_hparams(hparams, meters) + + +class Logger: + """ + A logger class that can interface with multiple loggers. It now supports tensorboard only for simplicity, but you can extend it with your own logger. + """ + + def __init__(self, logging_conf): + # allow turning off TensorBoard with "should_log: false" in config + tb_config = logging_conf.tensorboard_writer + tb_should_log = tb_config and tb_config.pop("should_log", True) + self.tb_logger = instantiate(tb_config) if tb_should_log else None + + def log_dict(self, payload: Dict[str, Scalar], step: int) -> None: + if self.tb_logger: + self.tb_logger.log_dict(payload, step) + + def log(self, name: str, data: Scalar, step: int) -> None: + if self.tb_logger: + self.tb_logger.log(name, data, step) + + def log_hparams( + self, hparams: Dict[str, Scalar], meters: Dict[str, Scalar] + ) -> None: + if self.tb_logger: + self.tb_logger.log_hparams(hparams, meters) + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + # we tune the buffering value so that the logs are updated + # frequently. + log_buffer_kb = 10 * 1024 # 10KB + io = g_pathmgr.open(filename, mode="a", buffering=log_buffer_kb) + atexit.register(io.close) + return io + + +def setup_logging( + name, + output_dir=None, + rank=0, + log_level_primary="INFO", + log_level_secondary="ERROR", +): + """ + Setup various logging streams: stdout and file handlers. + For file handlers, we only setup for the master gpu. + """ + # get the filename if we want to log to the file as well + log_filename = None + if output_dir: + makedir(output_dir) + if rank == 0: + log_filename = f"{output_dir}/log.txt" + + logger = logging.getLogger(name) + logger.setLevel(log_level_primary) + + # create formatter + FORMAT = "%(levelname)s %(asctime)s %(filename)s:%(lineno)4d: %(message)s" + formatter = logging.Formatter(FORMAT) + + # Cleanup any existing handlers + for h in logger.handlers: + logger.removeHandler(h) + logger.root.handlers = [] + + # setup the console handler + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + if rank == 0: + console_handler.setLevel(log_level_primary) + else: + console_handler.setLevel(log_level_secondary) + + # we log to file as well if user wants + if log_filename and rank == 0: + file_handler = logging.StreamHandler(_cached_log_stream(log_filename)) + file_handler.setLevel(log_level_primary) + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + + logging.root = logger + + +def shutdown_logging(): + """ + After training is done, we ensure to shut down all the logger streams. + """ + logging.info("Shutting down loggers...") + handlers = logging.root.handlers + for handler in handlers: + handler.close() diff --git a/detect_tools/sam3/sam3/train/utils/train_utils.py b/detect_tools/sam3/sam3/train/utils/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..43a80782760a3ebb81878f985e4ac5804ba14f21 --- /dev/null +++ b/detect_tools/sam3/sam3/train/utils/train_utils.py @@ -0,0 +1,285 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +import math +import os +import random +import re +from datetime import timedelta +from typing import Optional + +import hydra + +import numpy as np +import omegaconf +import torch +import torch.distributed as dist +from iopath.common.file_io import g_pathmgr +from omegaconf import OmegaConf + + +def multiply_all(*args): + return np.prod(np.array(args)).item() + + +def collect_dict_keys(config): + """This function recursively iterates through a dataset configuration, and collect all the dict_key that are defined""" + val_keys = [] + # If the this config points to the collate function, then it has a key + if "_target_" in config and re.match(r".*collate_fn.*", config["_target_"]): + val_keys.append(config["dict_key"]) + else: + # Recursively proceed + for v in config.values(): + if isinstance(v, type(config)): + val_keys.extend(collect_dict_keys(v)) + elif isinstance(v, omegaconf.listconfig.ListConfig): + for item in v: + if isinstance(item, type(config)): + val_keys.extend(collect_dict_keys(item)) + return val_keys + + +class Phase: + TRAIN = "train" + VAL = "val" + + +def register_omegaconf_resolvers(): + OmegaConf.register_new_resolver("get_method", hydra.utils.get_method) + OmegaConf.register_new_resolver("get_class", hydra.utils.get_class) + OmegaConf.register_new_resolver("add", lambda x, y: x + y) + OmegaConf.register_new_resolver("times", multiply_all) + OmegaConf.register_new_resolver("divide", lambda x, y: x / y) + OmegaConf.register_new_resolver("pow", lambda x, y: x**y) + OmegaConf.register_new_resolver("subtract", lambda x, y: x - y) + OmegaConf.register_new_resolver("range", lambda x: list(range(x))) + OmegaConf.register_new_resolver("int", lambda x: int(x)) + OmegaConf.register_new_resolver("ceil_int", lambda x: int(math.ceil(x))) + OmegaConf.register_new_resolver("merge", lambda *x: OmegaConf.merge(*x)) + OmegaConf.register_new_resolver("string", lambda x: str(x)) + + +def setup_distributed_backend(backend, timeout_mins): + """ + Initialize torch.distributed and set the CUDA device. + Expects environment variables to be set as per + https://pytorch.org/docs/stable/distributed.html#environment-variable-initialization + along with the environ variable "LOCAL_RANK" which is used to set the CUDA device. + """ + # enable TORCH_NCCL_ASYNC_ERROR_HANDLING to ensure dist nccl ops time out after timeout_mins + # of waiting + os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1" + logging.info(f"Setting up torch.distributed with a timeout of {timeout_mins} mins") + dist.init_process_group(backend=backend, timeout=timedelta(minutes=timeout_mins)) + return dist.get_rank() + + +def get_machine_local_and_dist_rank(): + """ + Get the distributed and local rank of the current gpu. + """ + local_rank = int(os.environ.get("LOCAL_RANK", None)) + distributed_rank = int(os.environ.get("RANK", None)) + assert ( + local_rank is not None and distributed_rank is not None + ), "Please the set the RANK and LOCAL_RANK environment variables." + return local_rank, distributed_rank + + +def print_cfg(cfg): + """ + Supports printing both Hydra DictConfig and also the AttrDict config + """ + logging.info("Training with config:") + logging.info(OmegaConf.to_yaml(cfg)) + + +def set_seeds(seed_value, max_epochs, dist_rank): + """ + Set the python random, numpy and torch seed for each gpu. Also set the CUDA + seeds if the CUDA is available. This ensures deterministic nature of the training. + """ + # Since in the pytorch sampler, we increment the seed by 1 for every epoch. + seed_value = (seed_value + dist_rank) * max_epochs + logging.info(f"MACHINE SEED: {seed_value}") + random.seed(seed_value) + np.random.seed(seed_value) + torch.manual_seed(seed_value) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed_value) + + +def makedir(dir_path): + """ + Create the directory if it does not exist. + """ + is_success = False + try: + if not g_pathmgr.exists(dir_path): + g_pathmgr.mkdirs(dir_path) + is_success = True + except BaseException: + logging.info(f"Error creating directory: {dir_path}") + return is_success + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_amp_type(amp_type: Optional[str] = None): + if amp_type is None: + return None + assert amp_type in ["bfloat16", "float16"], "Invalid Amp type." + if amp_type == "bfloat16": + return torch.bfloat16 + else: + return torch.float16 + + +def log_env_variables(): + env_keys = sorted(list(os.environ.keys())) + st = "" + for k in env_keys: + v = os.environ[k] + st += f"{k}={v}\n" + logging.info("Logging ENV_VARIABLES") + logging.info(st) + + +class AverageMeter: + """Computes and stores the average and current value""" + + def __init__(self, name, device, fmt=":f"): + self.name = name + self.fmt = fmt + self.device = device + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + self._allow_updates = True + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = "{name}: {val" + self.fmt + "} ({avg" + self.fmt + "})" + return fmtstr.format(**self.__dict__) + + +class MemMeter: + """Computes and stores the current, avg, and max of peak Mem usage per iteration""" + + def __init__(self, name, device, fmt=":f"): + self.name = name + self.fmt = fmt + self.device = device + self.reset() + + def reset(self): + self.val = 0 # Per iteration max usage + self.avg = 0 # Avg per iteration max usage + self.peak = 0 # Peak usage for lifetime of program + self.sum = 0 + self.count = 0 + self._allow_updates = True + + def update(self, n=1, reset_peak_usage=True): + self.val = torch.cuda.max_memory_allocated() // 1e9 + self.sum += self.val * n + self.count += n + self.avg = self.sum / self.count + self.peak = max(self.peak, self.val) + if reset_peak_usage: + torch.cuda.reset_peak_memory_stats() + + def __str__(self): + fmtstr = ( + "{name}: {val" + + self.fmt + + "} ({avg" + + self.fmt + + "}/{peak" + + self.fmt + + "})" + ) + return fmtstr.format(**self.__dict__) + + +def human_readable_time(time_seconds): + time = int(time_seconds) + minutes, seconds = divmod(time, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + return f"{days:02}d {hours:02}h {minutes:02}m" + + +class DurationMeter: + def __init__(self, name, device, fmt=":f"): + self.name = name + self.device = device + self.fmt = fmt + self.val = 0 + + def reset(self): + self.val = 0 + + def update(self, val): + self.val = val + + def add(self, val): + self.val += val + + def __str__(self): + return f"{self.name}: {human_readable_time(self.val)}" + + +class ProgressMeter: + def __init__(self, num_batches, meters, real_meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.real_meters = real_meters + self.prefix = prefix + + def display(self, batch, enable_print=False): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + entries += [ + " | ".join( + [ + f"{os.path.join(name, subname)}: {val:.4f}" + for subname, val in meter.compute().items() + ] + ) + for name, meter in self.real_meters.items() + ] + logging.info(" | ".join(entries)) + if enable_print: + print(" | ".join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = "{:" + str(num_digits) + "d}" + return "[" + fmt + "/" + fmt.format(num_batches) + "]" + + +def get_resume_checkpoint(checkpoint_save_dir): + if not g_pathmgr.isdir(checkpoint_save_dir): + return None + ckpt_file = os.path.join(checkpoint_save_dir, "checkpoint.pt") + if not g_pathmgr.isfile(ckpt_file): + return None + + return ckpt_file diff --git a/detect_tools/sam3/sam3/visualization_utils.py b/detect_tools/sam3/sam3/visualization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..090f0860768b020942920f8efcc6bffa808e63a5 --- /dev/null +++ b/detect_tools/sam3/sam3/visualization_utils.py @@ -0,0 +1,941 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import json +import os +import subprocess +from pathlib import Path + +import cv2 +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import pycocotools.mask as mask_utils +import torch +from matplotlib.colors import to_rgb +from PIL import Image +from skimage.color import lab2rgb, rgb2lab +from sklearn.cluster import KMeans +from torchvision.ops import masks_to_boxes +from tqdm import tqdm + + +def generate_colors(n_colors=256, n_samples=5000): + # Step 1: Random RGB samples + np.random.seed(42) + rgb = np.random.rand(n_samples, 3) + # Step 2: Convert to LAB for perceptual uniformity + # print(f"Converting {n_samples} RGB samples to LAB color space...") + lab = rgb2lab(rgb.reshape(1, -1, 3)).reshape(-1, 3) + # print("Conversion to LAB complete.") + # Step 3: k-means clustering in LAB + kmeans = KMeans(n_clusters=n_colors, n_init=10) + # print(f"Fitting KMeans with {n_colors} clusters on {n_samples} samples...") + kmeans.fit(lab) + # print("KMeans fitting complete.") + centers_lab = kmeans.cluster_centers_ + # Step 4: Convert LAB back to RGB + colors_rgb = lab2rgb(centers_lab.reshape(1, -1, 3)).reshape(-1, 3) + colors_rgb = np.clip(colors_rgb, 0, 1) + return colors_rgb + + +COLORS = generate_colors(n_colors=128, n_samples=5000) + + +def show_img_tensor(img_batch, vis_img_idx=0): + MEAN_IMG = np.array([0.485, 0.456, 0.406]) + STD_IMG = np.array([0.229, 0.224, 0.225]) + im_tensor = img_batch[vis_img_idx].detach().cpu() + assert im_tensor.dim() == 3 + im_tensor = im_tensor.numpy().transpose((1, 2, 0)) + im_tensor = (im_tensor * STD_IMG) + MEAN_IMG + im_tensor = np.clip(im_tensor, 0, 1) + plt.imshow(im_tensor) + + +def draw_box_on_image(image, box, color=(0, 255, 0)): + """ + Draws a rectangle on a given PIL image using the provided box coordinates in xywh format. + :param image: PIL.Image - The image on which to draw the rectangle. + :param box: tuple - A tuple (x, y, w, h) representing the top-left corner, width, and height of the rectangle. + :param color: tuple - A tuple (R, G, B) representing the color of the rectangle. Default is red. + :return: PIL.Image - The image with the rectangle drawn on it. + """ + # Ensure the image is in RGB mode + image = image.convert("RGB") + # Unpack the box coordinates + x, y, w, h = box + x, y, w, h = int(x), int(y), int(w), int(h) + # Get the pixel data + pixels = image.load() + # Draw the top and bottom edges + for i in range(x, x + w): + pixels[i, y] = color + pixels[i, y + h - 1] = color + pixels[i, y + 1] = color + pixels[i, y + h] = color + pixels[i, y - 1] = color + pixels[i, y + h - 2] = color + # Draw the left and right edges + for j in range(y, y + h): + pixels[x, j] = color + pixels[x + 1, j] = color + pixels[x - 1, j] = color + pixels[x + w - 1, j] = color + pixels[x + w, j] = color + pixels[x + w - 2, j] = color + return image + + +def plot_bbox( + img_height, + img_width, + box, + box_format="XYXY", + relative_coords=True, + color="r", + linestyle="solid", + text=None, + ax=None, +): + if box_format == "XYXY": + x, y, x2, y2 = box + w = x2 - x + h = y2 - y + elif box_format == "XYWH": + x, y, w, h = box + elif box_format == "CxCyWH": + cx, cy, w, h = box + x = cx - w / 2 + y = cy - h / 2 + else: + raise RuntimeError(f"Invalid box_format {box_format}") + + if relative_coords: + x *= img_width + w *= img_width + y *= img_height + h *= img_height + + if ax is None: + ax = plt.gca() + rect = patches.Rectangle( + (x, y), + w, + h, + linewidth=1.5, + edgecolor=color, + facecolor="none", + linestyle=linestyle, + ) + ax.add_patch(rect) + if text is not None: + facecolor = "w" + ax.text( + x, + y - 5, + text, + color=color, + weight="bold", + fontsize=8, + bbox={"facecolor": facecolor, "alpha": 0.75, "pad": 2}, + ) + + +def plot_mask(mask, color="r", ax=None): + im_h, im_w = mask.shape + mask_img = np.zeros((im_h, im_w, 4), dtype=np.float32) + mask_img[..., :3] = to_rgb(color) + mask_img[..., 3] = mask * 0.5 + # Use the provided ax or the current axis + if ax is None: + ax = plt.gca() + ax.imshow(mask_img) + + +def normalize_bbox(bbox_xywh, img_w, img_h): + # Assumes bbox_xywh is in XYWH format + if isinstance(bbox_xywh, list): + assert ( + len(bbox_xywh) == 4 + ), "bbox_xywh list must have 4 elements. Batching not support except for torch tensors." + normalized_bbox = bbox_xywh.copy() + normalized_bbox[0] /= img_w + normalized_bbox[1] /= img_h + normalized_bbox[2] /= img_w + normalized_bbox[3] /= img_h + else: + assert isinstance( + bbox_xywh, torch.Tensor + ), "Only torch tensors are supported for batching." + normalized_bbox = bbox_xywh.clone() + assert ( + normalized_bbox.size(-1) == 4 + ), "bbox_xywh tensor must have last dimension of size 4." + normalized_bbox[..., 0] /= img_w + normalized_bbox[..., 1] /= img_h + normalized_bbox[..., 2] /= img_w + normalized_bbox[..., 3] /= img_h + return normalized_bbox + + +def visualize_frame_output(frame_idx, video_frames, outputs, figsize=(12, 8)): + plt.figure(figsize=figsize) + plt.title(f"frame {frame_idx}") + img = load_frame(video_frames[frame_idx]) + img_H, img_W, _ = img.shape + plt.imshow(img) + for i in range(len(outputs["out_probs"])): + box_xywh = outputs["out_boxes_xywh"][i] + prob = outputs["out_probs"][i] + obj_id = outputs["out_obj_ids"][i] + binary_mask = outputs["out_binary_masks"][i] + color = COLORS[obj_id % len(COLORS)] + plot_bbox( + img_H, + img_W, + box_xywh, + text=f"(id={obj_id}, {prob=:.2f})", + box_format="XYWH", + color=color, + ) + plot_mask(binary_mask, color=color) + + +def visualize_formatted_frame_output( + frame_idx, + video_frames, + outputs_list, + titles=None, + points_list=None, + points_labels_list=None, + figsize=(12, 8), + title_suffix="", + prompt_info=None, +): + """Visualize up to three sets of segmentation masks on a video frame. + + Args: + frame_idx: Frame index to visualize + image_files: List of image file paths + outputs_list: List of {frame_idx: {obj_id: mask_tensor}} or single dict {obj_id: mask_tensor} + titles: List of titles for each set of outputs_list + points_list: Optional list of point coordinates + points_labels_list: Optional list of point labels + figsize: Figure size tuple + save: Whether to save the visualization to file + output_dir: Base output directory when saving + scenario_name: Scenario name for organizing saved files + title_suffix: Additional title suffix + prompt_info: Dictionary with prompt information (boxes, points, etc.) + """ + # Handle single output dict case + if isinstance(outputs_list, dict) and frame_idx in outputs_list: + # This is a single outputs dict with frame indices as keys + outputs_list = [outputs_list] + elif isinstance(outputs_list, dict) and not any( + isinstance(k, int) for k in outputs_list.keys() + ): + # This is a single frame's outputs {obj_id: mask} + single_frame_outputs = {frame_idx: outputs_list} + outputs_list = [single_frame_outputs] + + num_outputs = len(outputs_list) + if titles is None: + titles = [f"Set {i+1}" for i in range(num_outputs)] + assert ( + len(titles) == num_outputs + ), "length of `titles` should match that of `outputs_list` if not None." + + _, axes = plt.subplots(1, num_outputs, figsize=figsize) + if num_outputs == 1: + axes = [axes] # Make it iterable + + img = load_frame(video_frames[frame_idx]) + img_H, img_W, _ = img.shape + + for idx in range(num_outputs): + ax, outputs_set, ax_title = axes[idx], outputs_list[idx], titles[idx] + ax.set_title(f"Frame {frame_idx} - {ax_title}{title_suffix}") + ax.imshow(img) + + if frame_idx in outputs_set: + _outputs = outputs_set[frame_idx] + else: + print(f"Warning: Frame {frame_idx} not found in outputs_set") + continue + + if prompt_info and frame_idx == 0: # Show prompts on first frame + if "boxes" in prompt_info: + for box in prompt_info["boxes"]: + # box is in [x, y, w, h] normalized format + x, y, w, h = box + plot_bbox( + img_H, + img_W, + [x, y, x + w, y + h], # Convert to XYXY + box_format="XYXY", + relative_coords=True, + color="yellow", + linestyle="dashed", + text="PROMPT BOX", + ax=ax, + ) + + if "points" in prompt_info and "point_labels" in prompt_info: + points = np.array(prompt_info["points"]) + labels = np.array(prompt_info["point_labels"]) + # Convert normalized to pixel coordinates + points_pixel = points * np.array([img_W, img_H]) + + # Draw positive points (green stars) + pos_points = points_pixel[labels == 1] + if len(pos_points) > 0: + ax.scatter( + pos_points[:, 0], + pos_points[:, 1], + color="lime", + marker="*", + s=200, + edgecolor="white", + linewidth=2, + label="Positive Points", + zorder=10, + ) + + # Draw negative points (red stars) + neg_points = points_pixel[labels == 0] + if len(neg_points) > 0: + ax.scatter( + neg_points[:, 0], + neg_points[:, 1], + color="red", + marker="*", + s=200, + edgecolor="white", + linewidth=2, + label="Negative Points", + zorder=10, + ) + + objects_drawn = 0 + for obj_id, binary_mask in _outputs.items(): + mask_sum = ( + binary_mask.sum() + if hasattr(binary_mask, "sum") + else np.sum(binary_mask) + ) + + if mask_sum > 0: # Only draw if mask has content + # Convert to torch tensor if it's not already + if not isinstance(binary_mask, torch.Tensor): + binary_mask = torch.tensor(binary_mask) + + # Find bounding box from mask + if binary_mask.any(): + box_xyxy = masks_to_boxes(binary_mask.unsqueeze(0)).squeeze() + box_xyxy = normalize_bbox(box_xyxy, img_W, img_H) + else: + # Fallback: create a small box at center + box_xyxy = [0.45, 0.45, 0.55, 0.55] + + color = COLORS[obj_id % len(COLORS)] + + plot_bbox( + img_H, + img_W, + box_xyxy, + text=f"(id={obj_id})", + box_format="XYXY", + color=color, + ax=ax, + ) + + # Convert back to numpy for plotting + mask_np = ( + binary_mask.numpy() + if isinstance(binary_mask, torch.Tensor) + else binary_mask + ) + plot_mask(mask_np, color=color, ax=ax) + objects_drawn += 1 + + if objects_drawn == 0: + ax.text( + 0.5, + 0.5, + "No objects detected", + transform=ax.transAxes, + fontsize=16, + ha="center", + va="center", + color="red", + weight="bold", + ) + + # Draw additional points if provided + if points_list is not None and points_list[idx] is not None: + show_points( + points_list[idx], points_labels_list[idx], ax=ax, marker_size=200 + ) + + ax.axis("off") + + plt.tight_layout() + plt.show() + + +def render_masklet_frame(img, outputs, frame_idx=None, alpha=0.5): + """ + Overlays masklets and bounding boxes on a single image frame. + Args: + img: np.ndarray, shape (H, W, 3), uint8 or float32 in [0,255] or [0,1] + outputs: dict with keys: out_boxes_xywh, out_probs, out_obj_ids, out_binary_masks + frame_idx: int or None, for overlaying frame index text + alpha: float, mask overlay alpha + Returns: + overlay: np.ndarray, shape (H, W, 3), uint8 + """ + if img.dtype == np.float32 or img.max() <= 1.0: + img = (img * 255).astype(np.uint8) + img = img[..., :3] # drop alpha if present + height, width = img.shape[:2] + overlay = img.copy() + + for i in range(len(outputs["out_probs"])): + obj_id = outputs["out_obj_ids"][i] + color = COLORS[obj_id % len(COLORS)] + color255 = (color * 255).astype(np.uint8) + mask = outputs["out_binary_masks"][i] + if mask.shape != img.shape[:2]: + mask = cv2.resize( + mask.astype(np.float32), + (img.shape[1], img.shape[0]), + interpolation=cv2.INTER_NEAREST, + ) + mask_bool = mask > 0.5 + for c in range(3): + overlay[..., c][mask_bool] = ( + alpha * color255[c] + (1 - alpha) * overlay[..., c][mask_bool] + ).astype(np.uint8) + + # Draw bounding boxes and text + for i in range(len(outputs["out_probs"])): + box_xywh = outputs["out_boxes_xywh"][i] + obj_id = outputs["out_obj_ids"][i] + prob = outputs["out_probs"][i] + color = COLORS[obj_id % len(COLORS)] + color255 = tuple(int(x * 255) for x in color) + x, y, w, h = box_xywh + x1 = int(x * width) + y1 = int(y * height) + x2 = int((x + w) * width) + y2 = int((y + h) * height) + cv2.rectangle(overlay, (x1, y1), (x2, y2), color255, 2) + if prob is not None: + label = f"id={obj_id}, p={prob:.2f}" + else: + label = f"id={obj_id}" + cv2.putText( + overlay, + label, + (x1, max(y1 - 10, 0)), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + color255, + 1, + cv2.LINE_AA, + ) + + # Overlay frame index at the top-left corner + if frame_idx is not None: + cv2.putText( + overlay, + f"Frame {frame_idx}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 1.0, + (255, 255, 255), + 2, + cv2.LINE_AA, + ) + + return overlay + + +def save_masklet_video(video_frames, outputs, out_path, alpha=0.5, fps=10): + # Each outputs dict has keys: "out_boxes_xywh", "out_probs", "out_obj_ids", "out_binary_masks" + # video_frames: list of video frame data, same length as outputs_list + + # Read first frame to get size + first_img = load_frame(video_frames[0]) + height, width = first_img.shape[:2] + if first_img.dtype == np.float32 or first_img.max() <= 1.0: + first_img = (first_img * 255).astype(np.uint8) + # Use 'mp4v' for best compatibility with VSCode playback (.mp4 files) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + writer = cv2.VideoWriter("temp.mp4", fourcc, fps, (width, height)) + + outputs_list = [ + (video_frames[frame_idx], frame_idx, outputs[frame_idx]) + for frame_idx in sorted(outputs.keys()) + ] + + for frame, frame_idx, frame_outputs in tqdm(outputs_list): + img = load_frame(frame) + overlay = render_masklet_frame( + img, frame_outputs, frame_idx=frame_idx, alpha=alpha + ) + writer.write(cv2.cvtColor(overlay, cv2.COLOR_RGB2BGR)) + + writer.release() + + # Re-encode the video for VSCode compatibility using ffmpeg + subprocess.run(["ffmpeg", "-y", "-i", "temp.mp4", out_path]) + print(f"Re-encoded video saved to {out_path}") + + os.remove("temp.mp4") # Clean up temporary file + + +def save_masklet_image(frame, outputs, out_path, alpha=0.5, frame_idx=None): + """ + Save a single image with masklet overlays. + """ + img = load_frame(frame) + overlay = render_masklet_frame(img, outputs, frame_idx=frame_idx, alpha=alpha) + Image.fromarray(overlay).save(out_path) + print(f"Overlay image saved to {out_path}") + + +def prepare_masks_for_visualization(frame_to_output): + # frame_to_obj_masks --> {frame_idx: {'output_probs': np.array, `out_obj_ids`: np.array, `out_binary_masks`: np.array}} + for frame_idx, out in frame_to_output.items(): + _processed_out = {} + for idx, obj_id in enumerate(out["out_obj_ids"].tolist()): + if out["out_binary_masks"][idx].any(): + _processed_out[obj_id] = out["out_binary_masks"][idx] + frame_to_output[frame_idx] = _processed_out + return frame_to_output + + +def convert_coco_to_masklet_format( + annotations, img_info, is_prediction=False, score_threshold=0.5 +): + """ + Convert COCO format annotations to format expected by render_masklet_frame + """ + outputs = { + "out_boxes_xywh": [], + "out_probs": [], + "out_obj_ids": [], + "out_binary_masks": [], + } + + img_h, img_w = img_info["height"], img_info["width"] + + for idx, ann in enumerate(annotations): + # Get bounding box in relative XYWH format + if "bbox" in ann: + bbox = ann["bbox"] + if max(bbox) > 1.0: # Convert absolute to relative coordinates + bbox = [ + bbox[0] / img_w, + bbox[1] / img_h, + bbox[2] / img_w, + bbox[3] / img_h, + ] + else: + mask = mask_utils.decode(ann["segmentation"]) + rows = np.any(mask, axis=1) + cols = np.any(mask, axis=0) + if np.any(rows) and np.any(cols): + rmin, rmax = np.where(rows)[0][[0, -1]] + cmin, cmax = np.where(cols)[0][[0, -1]] + # Convert to relative XYWH + bbox = [ + cmin / img_w, + rmin / img_h, + (cmax - cmin + 1) / img_w, + (rmax - rmin + 1) / img_h, + ] + else: + bbox = [0, 0, 0, 0] + + outputs["out_boxes_xywh"].append(bbox) + + # Get probability/score + if is_prediction: + prob = ann["score"] + else: + prob = 1.0 # GT has no probability + outputs["out_probs"].append(prob) + + outputs["out_obj_ids"].append(idx) + mask = mask_utils.decode(ann["segmentation"]) + mask = (mask > score_threshold).astype(np.uint8) + + outputs["out_binary_masks"].append(mask) + + return outputs + + +def save_side_by_side_visualization(img, gt_anns, pred_anns, noun_phrase): + """ + Create side-by-side visualization of GT and predictions + """ + + # Create side-by-side visualization + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7)) + + main_title = f"Noun phrase: '{noun_phrase}'" + fig.suptitle(main_title, fontsize=16, fontweight="bold") + + gt_overlay = render_masklet_frame(img, gt_anns, alpha=0.5) + ax1.imshow(gt_overlay) + ax1.set_title("Ground Truth", fontsize=14, fontweight="bold") + ax1.axis("off") + + pred_overlay = render_masklet_frame(img, pred_anns, alpha=0.5) + ax2.imshow(pred_overlay) + ax2.set_title("Predictions", fontsize=14, fontweight="bold") + ax2.axis("off") + + plt.subplots_adjust(top=0.88) + plt.tight_layout() + + +def bitget(val, idx): + return (val >> idx) & 1 + + +def pascal_color_map(): + colormap = np.zeros((512, 3), dtype=int) + ind = np.arange(512, dtype=int) + for shift in reversed(list(range(8))): + for channel in range(3): + colormap[:, channel] |= bitget(ind, channel) << shift + ind >>= 3 + + return colormap.astype(np.uint8) + + +def draw_masks_to_frame( + frame: np.ndarray, masks: np.ndarray, colors: np.ndarray +) -> np.ndarray: + masked_frame = frame + for mask, color in zip(masks, colors): + curr_masked_frame = np.where(mask[..., None], color, masked_frame) + masked_frame = cv2.addWeighted(masked_frame, 0.75, curr_masked_frame, 0.25, 0) + + if int(cv2.__version__[0]) > 3: + contours, _ = cv2.findContours( + np.array(mask, dtype=np.uint8).copy(), + cv2.RETR_TREE, + cv2.CHAIN_APPROX_NONE, + ) + else: + _, contours, _ = cv2.findContours( + np.array(mask, dtype=np.uint8).copy(), + cv2.RETR_TREE, + cv2.CHAIN_APPROX_NONE, + ) + + cv2.drawContours( + masked_frame, contours, -1, (255, 255, 255), 7 + ) # White outer contour + cv2.drawContours( + masked_frame, contours, -1, (0, 0, 0), 5 + ) # Black middle contour + cv2.drawContours( + masked_frame, contours, -1, color.tolist(), 3 + ) # Original color inner contour + return masked_frame + + +def get_annot_df(file_path: str): + with open(file_path, "r") as f: + data = json.load(f) + + dfs = {} + + for k, v in data.items(): + if k in ("info", "licenses"): + dfs[k] = v + continue + df = pd.DataFrame(v) + dfs[k] = df + + return dfs + + +def get_annot_dfs(file_list: list[str]): + dfs = {} + for annot_file in tqdm(file_list): + dataset_name = Path(annot_file).stem + dfs[dataset_name] = get_annot_df(annot_file) + + return dfs + + +def get_media_dir(media_dir: str, dataset: str): + if dataset in ["saco_veval_sav_test", "saco_veval_sav_val"]: + return os.path.join(media_dir, "saco_sav", "JPEGImages_24fps") + elif dataset in ["saco_veval_yt1b_test", "saco_veval_yt1b_val"]: + return os.path.join(media_dir, "saco_yt1b", "JPEGImages_6fps") + elif dataset in ["saco_veval_smartglasses_test", "saco_veval_smartglasses_val"]: + return os.path.join(media_dir, "saco_sg", "JPEGImages_6fps") + elif dataset == "sa_fari_test": + return os.path.join(media_dir, "sa_fari", "JPEGImages_6fps") + else: + raise ValueError(f"Dataset {dataset} not found") + + +def get_all_annotations_for_frame( + dataset_df: pd.DataFrame, video_id: int, frame_idx: int, data_dir: str, dataset: str +): + media_dir = os.path.join(data_dir, "media") + + # Load the annotation and video data + annot_df = dataset_df["annotations"] + video_df = dataset_df["videos"] + + # Get the frame + video_df_current = video_df[video_df.id == video_id] + assert ( + len(video_df_current) == 1 + ), f"Expected 1 video row, got {len(video_df_current)}" + video_row = video_df_current.iloc[0] + file_name = video_row.file_names[frame_idx] + file_path = os.path.join( + get_media_dir(media_dir=media_dir, dataset=dataset), file_name + ) + frame = cv2.imread(file_path) + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + # Get the masks and noun phrases annotated in this video in this frame + annot_df_current_video = annot_df[annot_df.video_id == video_id] + if len(annot_df_current_video) == 0: + print(f"No annotations found for video_id {video_id}") + return frame, None, None + else: + empty_mask = np.zeros(frame.shape[:2], dtype=np.uint8) + mask_np_pairs = annot_df_current_video.apply( + lambda row: ( + ( + mask_utils.decode(row.segmentations[frame_idx]) + if row.segmentations[frame_idx] + else empty_mask + ), + row.noun_phrase, + ), + axis=1, + ) + # sort based on noun_phrases + mask_np_pairs = sorted(mask_np_pairs, key=lambda x: x[1]) + masks, noun_phrases = zip(*mask_np_pairs) + + return frame, masks, noun_phrases + + +def visualize_prompt_overlay( + frame_idx, + video_frames, + title="Prompt Visualization", + text_prompt=None, + point_prompts=None, + point_labels=None, + bounding_boxes=None, + box_labels=None, + obj_id=None, +): + """Simple prompt visualization function""" + img = Image.fromarray(load_frame(video_frames[frame_idx])) + fig, ax = plt.subplots(1, figsize=(6, 4)) + ax.imshow(img) + + img_w, img_h = img.size + + if text_prompt: + ax.text( + 0.02, + 0.98, + f'Text: "{text_prompt}"', + transform=ax.transAxes, + fontsize=12, + color="white", + weight="bold", + bbox=dict(boxstyle="round,pad=0.3", facecolor="red", alpha=0.7), + verticalalignment="top", + ) + + if point_prompts: + for i, point in enumerate(point_prompts): + x, y = point + # Convert relative to absolute coordinates + x_img, y_img = x * img_w, y * img_h + + # Use different colors for positive/negative points + if point_labels and len(point_labels) > i: + color = "green" if point_labels[i] == 1 else "red" + marker = "o" if point_labels[i] == 1 else "x" + else: + color = "green" + marker = "o" + + ax.plot( + x_img, + y_img, + marker=marker, + color=color, + markersize=10, + markeredgewidth=2, + markeredgecolor="white", + ) + ax.text( + x_img + 5, + y_img - 5, + f"P{i+1}", + color=color, + fontsize=10, + weight="bold", + bbox=dict(boxstyle="round,pad=0.2", facecolor="white", alpha=0.8), + ) + + if bounding_boxes: + for i, box in enumerate(bounding_boxes): + x, y, w, h = box + # Convert relative to absolute coordinates + x_img, y_img = x * img_w, y * img_h + w_img, h_img = w * img_w, h * img_h + + # Use different colors for positive/negative boxes + if box_labels and len(box_labels) > i: + color = "green" if box_labels[i] == 1 else "red" + else: + color = "green" + + rect = patches.Rectangle( + (x_img, y_img), + w_img, + h_img, + linewidth=2, + edgecolor=color, + facecolor="none", + ) + ax.add_patch(rect) + ax.text( + x_img, + y_img - 5, + f"B{i+1}", + color=color, + fontsize=10, + weight="bold", + bbox=dict(boxstyle="round,pad=0.2", facecolor="white", alpha=0.8), + ) + + # Add object ID info if provided + if obj_id is not None: + ax.text( + 0.02, + 0.02, + f"Object ID: {obj_id}", + transform=ax.transAxes, + fontsize=10, + color="white", + weight="bold", + bbox=dict(boxstyle="round,pad=0.3", facecolor="blue", alpha=0.7), + verticalalignment="bottom", + ) + + ax.set_title(title) + ax.axis("off") + plt.tight_layout() + plt.show() + + +def plot_results(img, results): + plt.figure(figsize=(12, 8)) + plt.imshow(img) + nb_objects = len(results["scores"]) + print(f"found {nb_objects} object(s)") + for i in range(nb_objects): + color = COLORS[i % len(COLORS)] + plot_mask(results["masks"][i].squeeze(0).cpu(), color=color) + w, h = img.size + prob = results["scores"][i].item() + plot_bbox( + h, + w, + results["boxes"][i].cpu(), + text=f"(id={i}, {prob=:.2f})", + box_format="XYXY", + color=color, + relative_coords=False, + ) + + +def single_visualization(img, anns, title): + """ + Create a single image visualization with overlays. + """ + fig, ax = plt.subplots(figsize=(7, 7)) + fig.suptitle(title, fontsize=16, fontweight="bold") + overlay = render_masklet_frame(img, anns, alpha=0.5) + ax.imshow(overlay) + ax.axis("off") + plt.tight_layout() + + +def show_mask(mask, ax, obj_id=None, random_color=False): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + cmap = plt.get_cmap("tab10") + cmap_idx = 0 if obj_id is None else obj_id + color = np.array([*cmap(cmap_idx)[:3], 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) + + +def show_box(box, ax): + x0, y0 = box[0], box[1] + w, h = box[2] - box[0], box[3] - box[1] + ax.add_patch( + plt.Rectangle((x0, y0), w, h, edgecolor="green", facecolor=(0, 0, 0, 0), lw=2) + ) + + +def show_points(coords, labels, ax, marker_size=375): + pos_points = coords[labels == 1] + neg_points = coords[labels == 0] + ax.scatter( + pos_points[:, 0], + pos_points[:, 1], + color="green", + marker="*", + s=marker_size, + edgecolor="white", + linewidth=1.25, + ) + ax.scatter( + neg_points[:, 0], + neg_points[:, 1], + color="red", + marker="*", + s=marker_size, + edgecolor="white", + linewidth=1.25, + ) + + +def load_frame(frame): + if isinstance(frame, np.ndarray): + img = frame + elif isinstance(frame, Image.Image): + img = np.array(frame) + elif isinstance(frame, str) and os.path.isfile(frame): + img = plt.imread(frame) + else: + raise ValueError(f"Invalid video frame type: {type(frame)=}") + return img diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9a11be035220d3a2fae52efcf5717e02489bbd8e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +torch==2.6.0 +torchvision==0.21.0 +transformers==4.50.1 +-e "detect_tools/sam3/.[train,dev]" +timm==1.0.9 +accelerate==1.4.0 +gradio +einops +ninja +scikit-image +decord +scikit-learn +scikit-image +matplotlib \ No newline at end of file diff --git a/vlm_fo1/__init__.py b/vlm_fo1/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/vlm_fo1/__init__.py @@ -0,0 +1 @@ + diff --git a/vlm_fo1/constants.py b/vlm_fo1/constants.py new file mode 100755 index 0000000000000000000000000000000000000000..ae6734f1b925109d6c03e69c322849c6aeb34702 --- /dev/null +++ b/vlm_fo1/constants.py @@ -0,0 +1,29 @@ +LOGDIR = "." + +global DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +# Model Constants +IGNORE_INDEX = -100 +IMAGE_TOKEN_INDEX = -200 #151656 #151655 #-200 +DEFAULT_IMAGE_TOKEN = "" +DEFAULT_IMAGE_PATCH_TOKEN = "" +DEFAULT_IM_START_TOKEN = "" +DEFAULT_IM_END_TOKEN = "" + +# For Qwen2_5_VL +QWEN2_5_VL_IMAGE_TOKEN = "<|image_pad|>" +QWEN2_5_VL_IMAGE_TOKEN_INDEX = 151655 + +# For regions +DEFAULT_REGION_TOKEN = ">" +DEFAULT_REGION_FEATURE_TOKEN = "" +DEFAULT_REGION_INDEX = -300 #151654 #151654 #-300 + +# For Grounding +DEFAULT_GROUNDING_START = "" +DEFAULT_GROUNDING_END = "" +DEFAULT_GROUNDING_OBJECTS_START = "" +DEFAULT_GROUNDING_OBJECTS_END = "" + +# For Think +DEFAULT_THINK_START = "" +DEFAULT_THINK_END = "" diff --git a/vlm_fo1/mm_utils.py b/vlm_fo1/mm_utils.py new file mode 100755 index 0000000000000000000000000000000000000000..157e0dde9954814ff56884ca93f9c1049a2671ca --- /dev/null +++ b/vlm_fo1/mm_utils.py @@ -0,0 +1,660 @@ +from PIL import Image +from PIL import ImageDraw, ImageOps +from io import BytesIO +import base64 +import re +import torch +from transformers import StoppingCriteria +from vlm_fo1.constants import IMAGE_TOKEN_INDEX, DEFAULT_REGION_INDEX +import requests +from vlm_fo1.constants import ( + IMAGE_TOKEN_INDEX, + DEFAULT_IMAGE_TOKEN, + DEFAULT_IM_START_TOKEN, + DEFAULT_IM_END_TOKEN, + IGNORE_INDEX, + DEFAULT_REGION_TOKEN, + DEFAULT_REGION_FEATURE_TOKEN +) +import torch +from transformers import TextStreamer +import random +import re +from typing import List, Tuple +import io +import base64 + + +def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): + """ + Tokenizes prompts containing or ... special tokens. + + If the prompt uses , , ..., each is replaced with a placeholder index (-200). + If the prompt uses , it is replaced with image_token_index. + + Args: + prompt (str): The prompt potentially containing image tokens. + tokenizer: The tokenizer object. + image_token_index (int): Token id to use when encountering token. + return_tensors (Optional[str]): If 'pt', return a torch tensor. + + Returns: + List[int] or torch.Tensor: The tokenized input with image token indices inserted appropriately. + """ + if "" in prompt: + # Case: prompt contains indexed image tokens like , , etc. + image_token_pattern = re.compile(r"") + prompt_chunks = re.split(r'', prompt) + image_tags = image_token_pattern.findall(prompt) + + input_ids = [] + for i, chunk in enumerate(prompt_chunks): + input_ids.extend(tokenizer(chunk).input_ids) + if i < len(image_tags): + # Insert placeholder where token was. + input_ids.append(-200) + else: + # Case: prompt contains plain tokens. + prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('')] + + def insert_separator(X, sep): + # Helper function to insert a separator token between chunks. + return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] + + input_ids = [] + offset = 0 + # If first chunk starts with token, make sure to keep it only once. + if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: + offset = 1 + input_ids.append(prompt_chunks[0][0]) + + # Insert image_token_index between chunks. + for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): + input_ids.extend(x[offset:]) + # Optionally convert output to PyTorch tensor. + if return_tensors is not None: + if return_tensors == 'pt': + return torch.tensor(input_ids, dtype=torch.long) + else: + raise ValueError(f'Unsupported tensor type: {return_tensors}') + + return input_ids + +def tokenizer_image_region_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, region_token_index=DEFAULT_REGION_INDEX, return_tensors=None): + """ + Tokenizes prompts containing both and delimiters, inserting specified token indices. + + Each chunk is split, and within that chunk, locations receive region_token_index. + + Args: + prompt (str): The prompt with and delimiters. + tokenizer: The tokenizer object. + image_token_index (int): Insert this at splits. + region_token_index (int): Insert this at splits. + return_tensors (Optional[str]): If 'pt', return torch tensor. + + Returns: + List[int] or torch.Tensor: The tokenized input with region/image tokens placed. + """ + # Split by tags first. + image_chunks = prompt.split('') + + prompt_chunks = [] + for chunk in image_chunks: + # Split each image chunk by . + obj_chunks = chunk.split('') + # Tokenize each subchunk. + token_chunks = [tokenizer(c).input_ids for c in obj_chunks] + prompt_chunks.append(token_chunks) + + input_ids = [] + offset = 0 + + # If first chunk starts with token, include only once. + if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and len(prompt_chunks[0][0]) > 0 and prompt_chunks[0][0][0] == tokenizer.bos_token_id: + offset = 1 + input_ids.append(prompt_chunks[0][0][0]) + + # Stitch together all chunks with region/image tokens at appropriate locations. + for i, chunk_group in enumerate(prompt_chunks): + if len(chunk_group) > 0: + input_ids.extend(chunk_group[0][offset:]) + for chunk in chunk_group[1:]: + input_ids.append(region_token_index) + input_ids.extend(chunk) + # Insert token except after the last image chunk. + if i < len(prompt_chunks) - 1: + input_ids.append(image_token_index) + # Optionally convert to PyTorch tensor. + if return_tensors is not None: + if return_tensors == 'pt': + return torch.tensor(input_ids, dtype=torch.long) + else: + raise ValueError(f'Unsupported tensor type: {return_tensors}') + + return input_ids + +class KeywordsStoppingCriteria(StoppingCriteria): + """ + Implements custom stopping criteria for generation based on keywords: + If the generated output contains any of the keywords, generation stops. + """ + def __init__(self, keywords, tokenizer, input_ids): + self.keywords = keywords + self.keyword_ids = [] + self.max_keyword_len = 0 + for keyword in keywords: + cur_keyword_ids = tokenizer(keyword).input_ids + # Remove BOS if present except for single token + if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id: + cur_keyword_ids = cur_keyword_ids[1:] + if len(cur_keyword_ids) > self.max_keyword_len: + self.max_keyword_len = len(cur_keyword_ids) + self.keyword_ids.append(torch.tensor(cur_keyword_ids)) + self.tokenizer = tokenizer + # Track the generation start length + self.start_len = input_ids.shape[1] + + def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + """ + Checks if a keyword exists in the latest generated output ids for a single batch element. + """ + offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len) + self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids] + for keyword_id in self.keyword_ids: + truncated_output_ids = output_ids[0, -keyword_id.shape[0]:] + if torch.equal(truncated_output_ids, keyword_id): + return True + outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0] + for keyword in self.keywords: + if keyword in outputs: + return True + return False + + def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + """ + Checks for keywords in each batch item; stops when all have satisfied the keyword condition. + """ + outputs = [] + for i in range(output_ids.shape[0]): + outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores)) + return all(outputs) + +def load_image(image_file): + """ + Loads an image from a local path, base64 string, URL, or PIL.Image. + + If the input image is smaller than 28x28, it will be resized to at least that size. + + Args: + image_file (str or PIL.Image.Image): Image source. + + Returns: + PIL.Image.Image: Loaded image in RGB mode, at least 28x28 in size. + """ + if isinstance(image_file, Image.Image): + image = image_file.convert("RGB") + # Case: load from URL + elif image_file.startswith("http") or image_file.startswith("https"): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert("RGB") + # Case: load from base64-encoded string + elif image_file.startswith("data:image/"): + image = image_file.replace("data:image/jpeg;base64,", "") + image_data = base64.b64decode(image) + image = Image.open(BytesIO(image_data)).convert("RGB") + elif isinstance(image_file, str): + # Case: load from local file path + image = Image.open(image_file).convert("RGB") + else: + raise ValueError(f"Unsupported image type: {type(image_file)}") + + # Ensure minimum size 28x28 + if image.width < 28 or image.height < 28: + image = image.resize((max(28, image.width), max(28, image.height))) + return image + +def image_to_base64(img_pil): + """ + Encodes a PIL Image as JPEG in base64 format. + + Args: + img_pil (PIL.Image.Image): Source image. + + Returns: + str: base64-encoded JPEG image string. + """ + with io.BytesIO() as buffer: + img_pil.save(buffer, format="JPEG") + base64_image = base64.b64encode(buffer.getvalue()).decode('utf-8') + return base64_image + +def draw_bboxes_and_save( + image: Image.Image, + fo1_bboxes: dict = {}, + detection_bboxes: List[Tuple[int, int, int, int]] = [], + output_path: str = 'output.jpg', + color: str = 'red', + total_color: str = 'green', + width: int = 2 +) -> None: + """ + Draws bounding boxes (both ground-truth/proposed and detection) on a PIL image and saves result. + + Args: + image (PIL.Image.Image): Input PIL image object. + fo1_bboxes (dict): Label -> List[bbox] mapping for annotation bboxes. + detection_bboxes (List[Tuple]): List of detection bounding boxes; each bbox is (x_min, y_min, x_max, y_max). + output_path (str): Path to save the output image. + color (str): Color for fo1_bboxes. + total_color (str): Color for detection_bboxes. + width (int): Rectangle outline width. + + Returns: + None + """ + draw = ImageDraw.Draw(image) + + # Draw detection boxes with `total_color` + for bbox in detection_bboxes: + if len(bbox) != 4: + print(f"Warning: skip the invalid bbox {bbox}") + continue + shape = [(bbox[0], bbox[1]), (bbox[2], bbox[3])] + draw.rectangle(shape, outline=total_color, width=width) + + # Draw annotated bboxes with labels and `color` + for bbox_label, bbox_list in fo1_bboxes.items(): + for bbox in bbox_list: + if len(bbox) != 4: + print(f"Warning: skip the invalid bbox {bbox}") + continue + shape = [(bbox[0], bbox[1]), (bbox[2], bbox[3])] + draw.rectangle(shape, outline=color, width=width) + draw.text((bbox[0], bbox[1]), bbox_label, fill=color) + + # Save output image (catching common IO exceptions). + try: + image.save(output_path) + print(f"The image has been successfully saved to: {output_path}") + except IOError as e: + print(f"Error: failed to save the image to {output_path}. Reason: {e}") + +def adjust_bbox(bbox_list, original_h, original_w, resize_h, resize_w): + """ + Adjusts bounding boxes from original image size to resized image size, compensating for scaling. + + Args: + bbox_list (List[List[float]]): List of original boxes [x1, y1, x2, y2]. + original_h (int): Original image height. + original_w (int): Original image width. + resize_h (int): Resized image height. + resize_w (int): Resized image width. + + Returns: + List[List[float]]: Bounding boxes transformed to resized image coordinates. + """ + output_list = [] + def adjust_bbox_range(bbox, width, height): + # Ensure all coordinates are within the original image border. + x1, y1, x2, y2 = bbox + x1 = max(0, min(width, x1)) + y1 = max(0, min(height, y1)) + x2 = max(0, min(width, x2)) + y2 = max(0, min(height, y2)) + return [x1, y1, x2, y2] + + for bbox in bbox_list: + bbox = adjust_bbox_range(bbox, original_w, original_h) + bbox[0] = bbox[0] * resize_w / original_w + bbox[1] = bbox[1] * resize_h / original_h + bbox[2] = bbox[2] * resize_w / original_w + bbox[3] = bbox[3] * resize_h / original_h + output_list.append(bbox) + return output_list + +def extract_predictions_to_bboxes(prediction: str, bbox_list): + """ + Parse prediction string in the expected format and map each ground label + to its corresponding bounding boxes using bbox_list. + + Args: + prediction (str): Model output string with ...... markup. + bbox_list (List[List[float]]): Full list of predicted or reference bounding boxes. + + Returns: + dict: label -> list of bboxes + """ + label_to_indexes = {} + label_to_bboxes = {} + + match_pattern = r"(.*?)<\/ground>(.*?)<\/objects>" + matches = re.findall(match_pattern, prediction) + + for label_text, indexes in matches: + label_text = label_text.strip() + indexes_tags = re.findall(r"", indexes) + region_indexes = set([int(index.split("")[0]) for index in indexes_tags]) + if label_text not in label_to_indexes: + label_to_indexes[label_text] = region_indexes + else: + label_to_indexes[label_text] = label_to_indexes[label_text] | region_indexes + + for label, indexes in label_to_indexes.items(): + label_to_bboxes[label] = [bbox_list[index] for index in indexes] + + return label_to_bboxes + +def extract_predictions_to_indexes(prediction: str): + """ + Parse prediction string, returning label -> set-of-indexes mapping. + + Args: + prediction (str): Model prediction output. + + Returns: + dict: label -> set(int) + """ + label_to_indexes = {} + match_pattern = r"(.*?)<\/ground>(.*?)<\/objects>" + matches = re.findall(match_pattern, prediction) + + for label_text, indexes in matches: + label_text = label_text.strip() + indexes_tags = re.findall(r"", indexes) + region_indexes = set([int(index.split("")[0]) for index in indexes_tags]) + if label_text not in label_to_indexes: + label_to_indexes[label_text] = region_indexes + else: + label_to_indexes[label_text] = label_to_indexes[label_text] | region_indexes + + return label_to_indexes + +def resize_shortest_edge_images_and_bboxes( + image_list: List[Image.Image], + bbox_lists: List, + candidate_sizes: List[int] = [], + max_size: int = 2048 + ): + """ + Randomly selects a size for the shortest edge, and proportionally resizes both images and bounding boxes. + + The function maintains the image aspect ratio and ensures that the resized dimensions do not exceed the specified max_size. + Bounding boxes are transformed accordingly. + + Args: + image_list (List[Image.Image]): A list of PIL Image objects. + bbox_lists (List[List[List[float]]]): A list of lists of bounding boxes per image. + candidate_sizes (List[int]): Optional list of sizes to choose the target short edge from. + max_size (int): Maximum allowed long edge after resizing. + + Returns: + Tuple[List[Image.Image], List[List[List[float]]]]: + ([resized_image1, ...], [bbox_list1, ...]) - Possibly shape will match original (see below) + + Raises: + ValueError: on input list length mismatch or emptiness. + """ + bbox_tensor = torch.tensor(bbox_lists) + # Normalize input: wrap bbox_lists into list-of-list, if needed. + if len(bbox_tensor.shape) == 2 and bbox_tensor.shape[1] == 4: + bbox_lists = [bbox_lists] + + if not image_list or not bbox_lists: + raise ValueError("Input lists cannot be empty.") + if len(image_list) != len(bbox_lists): + raise ValueError("The lengths of the image list and the bounding box list must be the same.") + + # Randomly select short edge size (if given candidate sizes) + if len(candidate_sizes) > 0: + target_size = random.choice(candidate_sizes) + else: + target_size = None + + resized_images = [] + transformed_bbox_lists = [] + + # Process each image and its corresponding bbox list + for img, bboxes in zip(image_list, bbox_lists): + original_width, original_height = img.size + + # Determine scaling factor to bring short edge to target_size + shortest_side = min(original_width, original_height) + if target_size: + scale = target_size / shortest_side + else: + scale = 1.0 + + # Propose new height and width with this scale + new_height, new_width = int(original_height * scale), int(original_width * scale) + + # If resulting long edge exceeds max_size, rescale down so that it fits. + longest_side = max(new_height, new_width) + if longest_side > max_size: + scale = max_size / longest_side + new_height, new_width = int(new_height * scale), int(new_width * scale) + # Ensure images are at least 28x28 (model may expect it) + new_width = max(28, new_width) + new_height = max(28, new_height) + + # Resize image, using BICUBIC for quality if shape changes + if new_width == original_width and new_height == original_height: + resized_img = img + else: + resized_img = img.resize((new_width, new_height), Image.Resampling.BICUBIC) + resized_images.append(resized_img) + + # Transform bounding boxes + current_transformed_bboxes = [] + scale_ratio_x = new_width / original_width + scale_ratio_y = new_height / original_height + for bbox in bboxes: + x1, y1, x2, y2 = bbox + new_x1 = x1 * scale_ratio_x + new_y1 = y1 * scale_ratio_y + new_x2 = x2 * scale_ratio_x + new_y2 = y2 * scale_ratio_y + current_transformed_bboxes.append([new_x1, new_y1, new_x2, new_y2]) + transformed_bbox_lists.append(current_transformed_bboxes) + + # If original input was a single image (not list), unpack. + if len(bbox_tensor.shape) == 2 and bbox_tensor.shape[1] == 4: + return resized_images, transformed_bbox_lists[0] + else: + return resized_images, transformed_bbox_lists + +def make_message_context(tokenizer, message, chat_format="chatml"): + """ + Given a message dict, construct the prompt, tokenized context tokens, image URLs, and bbox_list. + + Handles both standard string 'content' and multi-part (list) content, appropriately placing image/region tokens. + + Args: + tokenizer: tokenizer object + message (dict): Contains role, content, and optionally bbox_list. + chat_format (str): Optionally select chat format (default 'chatml'). + + Returns: + tuple: (inp, context_tokens, image_urls, bbox_list) + """ + image_urls = [] + if chat_format == "chatml": + im_start, im_end = "<|im_start|>", "<|im_end|>" + im_start_tokens = [151644] + im_end_tokens = [151645] + nl_tokens = tokenizer.encode("\n") + role = message["role"] + content = message["content"] + bbox_list = message.get("bbox_list", None) + + if role == "system": + inp = f"{im_start}{role}\n{content}{im_end}\n" + context_tokens = tokenizer.encode( + role, allowed_special=set()) + nl_tokens + tokenizer.encode(content, allowed_special=set()) + context_tokens = im_start_tokens + context_tokens + im_end_tokens + + if role == "user": + if isinstance(content, str): + # Plain string message + inp = f"{im_start}{role}\n{content}{im_end}\n" + context_tokens = tokenizer.encode( + role, allowed_special=set()) + nl_tokens + tokenizer.encode(content, + allowed_special=set()) + context_tokens = im_start_tokens + context_tokens + im_end_tokens + if isinstance(content, list): + # Multi-part message (text and image_url parts, maybe region tokens) + inp = f"{im_start}{role}\n" + image_count = 1 + for message_part in content: + if message_part["type"] == "text": + inp += f"{message_part['text']}" + + if message_part["type"] == "image_url": + # Insert special vision/image tokens, possibly region tokens + inp += DEFAULT_IM_START_TOKEN + '' + DEFAULT_IM_END_TOKEN + '\n' + # If regions exist, add per-region special token. + if bbox_list and len(bbox_list) > 0: + for idx, bbox in enumerate(bbox_list): + inp += DEFAULT_REGION_TOKEN.replace('', str(idx)) + DEFAULT_REGION_FEATURE_TOKEN + inp += '\n' + + image_urls.append(message_part['image_url']['url']) + image_count += 1 + inp += f"{im_end}\n" + + # Choose tokenizer logic based on whether bbox (region) list exists + if bbox_list and len(bbox_list) > 0: + context_tokens = tokenizer_image_region_token(inp, tokenizer) + else: + context_tokens = tokenizer_image_token(inp, tokenizer, image_token_index=IMAGE_TOKEN_INDEX) + return inp, context_tokens, image_urls, bbox_list + +def prepare_inputs(model_name, model, image_processors, tokenizer, messages, device="cuda", max_tokens=512, top_p=1.0, temperature=0.0, do_sample=False, image_size=None): + """ + Fully prepares keyword arguments for model.generate (and compatible API) from messages and model specs. + + Handles prompt assembly, tokenization, image loading/preprocessing, region support, streaming, etc. + Supports specific tweak for Qwen2.5-VL style vision tokens. + + Args: + model_name (str): Model identifier string. + model: Model/config object. + image_processors (tuple): (primary, auxiliary) image processors. + tokenizer: Tokenizer object. + messages (list): Multi-message input list (chat history). + device (str): Target (usually 'cuda' or 'cpu'). + max_tokens, top_p, temperature, do_sample: Standard generation kwargs. + + Returns: + dict: ready-to-use argument dict for model.generate(). + """ + # For Qwen2.5-VL, patch vision special tokens globally. + if 'qwen2.5-vl' in model_name.lower() or 'qwen2_5_vl' in model_name.lower(): + global DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + DEFAULT_IM_START_TOKEN = "<|vision_start|>" + DEFAULT_IM_END_TOKEN = "<|vision_end|>" + + primary_image_processor, auxiliary_image_processor = image_processors + + prompt = "" + input_tokens = [] + image_urls = [] + # Compose prompt and accumulate all components from provided messages + for message in messages: + inp, context_tokens, image_urls, bbox_list = make_message_context(tokenizer, message) + prompt += inp + input_tokens.extend(context_tokens) + + # Ensure a system prompt at start, if not already present. + if "system" not in prompt: + system_content = "system\nYou are a helpful assistant." + system_prompt = "<|im_start|>" + system_content + "<|im_end|>" + "\n" + prompt = system_prompt + prompt + system_tokens = [151644] + tokenizer(system_content).input_ids + [151645] + tokenizer("\n").input_ids + input_tokens = system_tokens + input_tokens + + # Ensure prompt ends with assistant's turn. + if not prompt.endswith("<|im_start|>assistant"): + last_assistant_prompt = "<|im_start|>" + "assistant" + "\n" + prompt += last_assistant_prompt + # last_assistant_tokens = [6] + self.tokenizer("assistant\n").input_ids + last_assistant_tokens = [151644] + tokenizer("assistant\n").input_ids + input_tokens.extend(last_assistant_tokens) + + primary_images_tensor = None + auxiliary_images_tensor = None + primary_image_grid_thws = None + if image_urls: + # Load images, resize them, and update bbox_list downstream + images = [load_image(i) for i in image_urls] + if image_size is not None: + images, bbox_list = resize_shortest_edge_images_and_bboxes(images, bbox_list, candidate_sizes=[image_size], max_size=2048) + else: + images, bbox_list = resize_shortest_edge_images_and_bboxes(images, bbox_list, max_size=2048) + + + # When region-indexed tokens are enabled + if getattr(model.config, 'mm_use_region_index_token', False): + origin_image_size = [image.size for image in images] + aux_images = images.copy() + auxiliary_images_tensor = [auxiliary_image_processor.preprocess(i, return_tensors='pt')['pixel_values'][0].to(device) for i in aux_images] + + if bbox_list and len(bbox_list) > 0: + # Limit number of bbox (for computational constraints, etc.) + bbox_list = bbox_list[:100] + resize_h, resize_w = auxiliary_images_tensor[0].shape[-2:] + original_w, original_h = origin_image_size[0] + # Adjust bbox to match resized images (post pre-processing) + bbox_list = adjust_bbox(bbox_list, original_h, original_w, resize_h, resize_w) + bbox_list = [torch.tensor(bbox_list)] + else: + bbox_list = None + else: + auxiliary_images_tensor = None + + # Preprocess primary images for main vision model branch + primary_images = [] + primary_image_grid_thws = [] + for im in images: + processed_data = primary_image_processor.preprocess(im, return_tensors="pt") + image_i = processed_data['pixel_values'] + image_grid_thw_i = processed_data['image_grid_thw'] + primary_images.append(image_i) + primary_image_grid_thws.append(image_grid_thw_i) + primary_images_tensor = [image_i.to(device) for image_i in primary_images] + + # For Qwen-style, force specific end-token as stopping criterion + if "qwen" in model_name.lower(): + input_ids = torch.tensor([input_tokens]).to(device) + keywords = ["<|im_end|>"] + + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + streamer = TextStreamer( + tokenizer, skip_prompt=True, skip_special_tokens=True + ) + + # Default: greedy decoding if temperature=0. Else: enable sampling. + if temperature == 0.0: + do_sample = False + else: + do_sample = True + + print("question:================\n", prompt, "\n=================") + # print("input ids:========", input_ids, "========") + generation_kwargs = dict( + inputs=input_ids, + images=primary_images_tensor, + images_aux=auxiliary_images_tensor, + image_grid_thws=primary_image_grid_thws, + bbox_list=bbox_list, + do_sample=do_sample, + temperature=temperature, + max_new_tokens=max_tokens, + streamer=streamer, + top_p=top_p, + use_cache=True, + stopping_criteria=[stopping_criteria], + pad_token_id=tokenizer.pad_token_id + ) + return generation_kwargs + diff --git a/vlm_fo1/model/__init__.py b/vlm_fo1/model/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..de7b6f435ea394dacc44a447abbfa01e2dcdfc09 --- /dev/null +++ b/vlm_fo1/model/__init__.py @@ -0,0 +1 @@ +from .language_model.omchat_qwen2_5_vl import OmChatQwen25VLForCausalLM, OmChatQwen25VLConfig \ No newline at end of file diff --git a/vlm_fo1/model/builder.py b/vlm_fo1/model/builder.py new file mode 100755 index 0000000000000000000000000000000000000000..a6d1dc2426f6a2e6234430ed7354388894516507 --- /dev/null +++ b/vlm_fo1/model/builder.py @@ -0,0 +1,89 @@ +from transformers import AutoTokenizer +import torch +from vlm_fo1.model import * +from safetensors.torch import load_file +import os + + +def load_pretrained_model(model_path, load_8bit=False, load_4bit=False, device="cuda"): + """ + Loads a pretrained model along with its vision towers (and associated image processors). + This function supports loading in 8bit/4bit precision and explicit device placement. + + Args: + model_path (str): Path to the pretrained model directory. + load_8bit (bool): Whether to load the model in 8bit mode. + load_4bit (bool): Whether to load the model in 4bit mode. + device (str): Device to load model onto, e.g., "cuda" or "cpu". + + Returns: + tuple: (tokenizer, model, image_processor) + """ + kwargs = {"device_map": device} + + # Set model loading parameters for quantization or floating point + if load_8bit: + kwargs['load_in_8bit'] = True + elif load_4bit: + kwargs['load_in_4bit'] = True + else: + kwargs['torch_dtype'] = torch.bfloat16 + + # print(model_path) + + # Only proceed for vlm-fo1 models + if 'vlm-fo1' in model_path.lower(): + # Load tokenizer (slow tokenizer enforced) + tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) + # If this is the Qwen2.5-VL variant, load with additional kwargs + if 'qwen2.5-vl' in model_path.lower() or 'qwen2_5_vl' in model_path.lower(): + model, loading_info = OmChatQwen25VLForCausalLM.from_pretrained( + model_path, + low_cpu_mem_usage=True, + output_loading_info=True, + attn_implementation="flash_attention_2", + **kwargs + ) + # print(f'OmChatQwen25VLForCausalLM loading_info: {loading_info}') + # (For other variants of vlm-fo1, model loading detail may need additional condition.) + + if 'vlm-fo1' in model_path.lower(): + # --- Vision Tower Loading --- + # Load the main vision tower weights from model_path if it is not yet loaded + primary_vision_tower = model.get_vision_tower() + if primary_vision_tower and not primary_vision_tower.is_loaded: + primary_vision_tower.load_model(model_path=model_path, is_train=False) + primary_vision_tower.to(device=device, dtype=torch.bfloat16) # Move to correct device/dtype + + # Grab primary image processor from vision tower, if present + if primary_vision_tower: + primary_image_processor = primary_vision_tower.image_processor + + # --- Auxiliary Vision Tower Handling (Qwen2.5-VL case only) --- + if 'qwen2.5-vl' in model_path.lower() or 'qwen2_5_vl' in model_path.lower(): + try: + aux_image_size = model.config.aux_image_size + except Exception: + # If aux_image_size is missing from config fallback to 768 + aux_image_size = 768 + + aux_image_aspect_ratio = model.config.aux_image_aspect_ratio + aux_vision_tower = model.get_vision_tower_aux() + # Only load if not already loaded + if aux_vision_tower and not aux_vision_tower.is_loaded: + aux_vision_tower.load_model(image_size=aux_image_size, is_train=False, aspect_ratio=aux_image_aspect_ratio) + aux_vision_tower.to(device=device, dtype=torch.bfloat16) + + # Get auxiliary image processor if there is an aux vision tower + if aux_vision_tower: + aux_image_processor = aux_vision_tower.image_processor + else: + image_processor = None # Set to None if there is no auxiliary vision tower + + # image_processor returned as a tuple of (primary, aux) + image_processor = (primary_image_processor, aux_image_processor) + + # Set model to eval mode and move to correct device before returning + model.eval() + model.to(device=device, dtype=torch.bfloat16) + return tokenizer, model, image_processor diff --git a/vlm_fo1/model/language_model/omchat_qwen2_5_vl.py b/vlm_fo1/model/language_model/omchat_qwen2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd367afaa84600863376450efb570c3d942f36d --- /dev/null +++ b/vlm_fo1/model/language_model/omchat_qwen2_5_vl.py @@ -0,0 +1,576 @@ +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from transformers import Qwen2_5_VLConfig, AutoConfig, AutoModelForCausalLM +from vlm_fo1.model.multimodal_encoder.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLModel, Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLCausalLMOutputWithPast +from vlm_fo1.model.multimodal_encoder.qwen2_5_vl_encoder import Qwen2_5_VlVisionTower +from vlm_fo1.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_REGION_INDEX, QWEN2_5_VL_IMAGE_TOKEN, QWEN2_5_VL_IMAGE_TOKEN_INDEX + +from ..omchat_arch import OmChatMetaModel, OmChatMetaForCausalLM + +# Custom config which extends Qwen2_5_VLConfig for OmChat multimodal model +class OmChatQwen25VLConfig(Qwen2_5_VLConfig): + model_type = "omchat_qwen2_5_vl" + rotary_type = "normal_rotary" + multi_scale_im = None + vision_tower_aux = None + +# Core model definition: inherits from OmChat and Qwen multimodal base +class OmChatQwen25VLModel(OmChatMetaModel, Qwen2_5_VLModel): + config_class = OmChatQwen25VLConfig + + def __init__(self, config: Qwen2_5_VLConfig): + super(OmChatQwen25VLModel, self).__init__(config) + +# Main class for multimodal CausalLM +class OmChatQwen25VLForCausalLM(Qwen2_5_VLForConditionalGeneration, OmChatMetaForCausalLM): + config_class = OmChatQwen25VLConfig + + def __init__(self, config, delay_load=True): + # Ensure config has delay_load property + if not hasattr(config, 'delay_load'): + config.delay_load = delay_load + super(Qwen2_5_VLForConditionalGeneration, self).__init__(config) + self.model = OmChatQwen25VLModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.rope_deltas = None # cache rope_deltas here + + self.post_init() + + # Encode input images into feature representations + def encode_images(self, images, images_grid_thw=None): + # If vision_tower is Qwen2.5-specific, use its custom forward signature + if isinstance(self.get_model().get_vision_tower(), Qwen2_5_VlVisionTower): + image_features = self.get_model().get_vision_tower()(images, images_grid_thw) + image_features, image_grid_thws, multi_level_features = image_features + # If multiple images, handle concatenation + if type(image_features) is list: + # List has items of shape (1, seq_len, dim) + token_length_list = [i.shape[1] for i in image_features] + image_features = torch.cat(image_features, dim=1) # Concatenate to (1, total_seq_len, dim) + else: + image_features = self.get_model().get_vision_tower()(images) + image_grid_thws = None + multi_level_features = None + + image_features = self.get_model().mm_projector(image_features) + + # Split concatenated image features back by original lengths (for multi-image case) + if isinstance(self.get_model().get_vision_tower(), Qwen2_5_VlVisionTower): + start = 0 + new_image_features = [] + # Split according to token_length_list + for length in token_length_list: + end = start + length + new_image_features.append(image_features[:, start:end, :].squeeze(0)) + start = end + image_features = new_image_features + + return image_features, image_grid_thws, multi_level_features + + # Encode region regions (bounding boxes) into features, optionally using auxiliary vision tower + def encode_regions(self, images, bbox_list, vt_multi_level_features=None, vt_images_size=None): + aux_image_features_list = self.get_model().get_vision_tower_aux()(images) + region_features = [] + if getattr(self.config, "mm_use_vision_tower_region_feature", False): + image_features_list = vt_multi_level_features + for batch_idx, (image_features, aux_image_features) in enumerate(zip(image_features_list, aux_image_features_list)): + + if getattr(self.config, "mm_use_simpleFPN_for_vt", False): + multilevel_visual_feats = image_features[-1] + else: + multilevel_visual_feats = image_features + multilevel_aux_visual_feats = aux_image_features["image_features"] + boxes = bbox_list[batch_idx] + + # If no boxes provided, use dummy box (covers tiny region) + if boxes is None or len(boxes) == 0: + boxes = torch.tensor([[0, 10, 0, 10]], device=multilevel_aux_visual_feats[0].device, dtype=torch.float32) + + boxes = boxes.to(torch.float32).to(multilevel_aux_visual_feats[0].device) + current_image_height, current_image_width = images[batch_idx].shape[-2:] + original_height, original_width = vt_images_size[batch_idx] + # Scale bounding boxes from original image size to processed size + scale_height = original_height / current_image_height + scale_width = original_width / current_image_width + vt_boxes = boxes * torch.tensor([scale_width, scale_height, scale_width, scale_height], device=boxes.device) + + extracted_region_feat = self.get_model().object_vp_extractor( + aux_multi_level_features=multilevel_aux_visual_feats, + vt_multi_level_features=multilevel_visual_feats, + aux_boxes=[boxes], + vt_boxes=[vt_boxes] + ).squeeze(0).to(multilevel_aux_visual_feats[0].dtype) + region_feat = self.get_model().mm_projector_aux(extracted_region_feat) # [num_bbox, 2048] + region_features.append(region_feat) + else: + # Extract region features only from auxiliary vision tower + for batch_idx, image_features in enumerate(aux_image_features_list): + multilevel_visual_feats = image_features["image_features"] + last_feat = image_features["last_feat"] + boxes = bbox_list[batch_idx] + + if boxes is None or len(boxes) == 0: + boxes = torch.tensor([[0, 10, 0, 10]], device=multilevel_visual_feats[0].device, dtype=torch.float32) + + multi_level_aux_features = multilevel_visual_feats + boxes = boxes.to(torch.float32).to(multi_level_aux_features[0].device) + extracted_region_feat = self.get_model().object_vp_extractor( + multi_level_aux_features, + [boxes], + ).squeeze(0).to(multi_level_aux_features[0].dtype) + region_feat = self.get_model().mm_projector_aux(extracted_region_feat) # [num_bbox, 2880] + region_features.append(region_feat) + + return region_features + + def get_model(self): + # Getter for model. Used to access backbone/model internals. + return self.model + + # Convert sequence of input_ids/labels/images/boxes to multimodal embedding and associated masks/ids for transformer input. + def prepare_inputs_labels_for_qwen2_5_vl_multimodal( + self, input_ids, position_ids, attention_mask, past_key_values, labels, images, images_aux=None, bbox_list=None, image_grid_thws=None + ): + # ========================== Above this line, input parsing and batching ============================= + vision_tower = self.get_vision_tower() + video_tower = self.get_video_tower() + vision_tower_aux = self.get_vision_tower_aux() + # Fast-path for non-multimodal case or first step in generation (i.e. only one token in input) + if (vision_tower is None and video_tower is None) or images is None or input_ids.shape[1] == 1: + if past_key_values is not None and (vision_tower is not None or video_tower is not None) and images is not None and input_ids.shape[1] == 1: + + target_shape = past_key_values[-1][-1].shape[-2] + 1 + attention_mask = torch.cat((attention_mask, torch.ones( + (attention_mask.shape[0], target_shape - attention_mask.shape[1]), + dtype=attention_mask.dtype, + device=attention_mask.device + )), dim=1) + + position_ids=None + cache_position = torch.tensor([target_shape - 1],device=attention_mask.device) + return input_ids, position_ids, attention_mask, past_key_values, None, labels, None, cache_position + + # Indices for images (3D or 2D tensors) and videos (4D tensors) + image_idx = [idx for idx, img in enumerate(images) if img.ndim == 3 or img.ndim == 2] + is_all_image = len(image_idx) == len(images) + video_idx = [idx for idx, vid in enumerate(images) if vid.ndim == 4] + + # Stack image and video tensors accordingly for mini-batch processing + if isinstance(vision_tower, Qwen2_5_VlVisionTower): + images_minibatch = [images[idx] for idx in image_idx] if len(image_idx) > 0 else [] # list of [c,h,w], can have variable shapes + else: + images_minibatch = torch.stack([images[idx] for idx in image_idx]) if len(image_idx) > 0 else [] # tensor [mini_b, c, h, w] + videos_minibatch = torch.stack([images[idx] for idx in video_idx]) if len(video_idx) > 0 else [] # tensor [mini_b, c, t, h, w] + + # Auxiliary batch for region encoding, if relevant + if vision_tower_aux is not None and images_aux is not None: + images_minibatch_aux = [images_aux[idx].unsqueeze(0) for idx in image_idx] if len(image_idx) > 0 else [] # list of [1, c, h, w] + + # tmp_image_features will be indexed to scatter extracted image/video features into original batch positions + tmp_image_features = [None] * (len(image_idx) + len(video_idx)) + if getattr(images_minibatch, 'ndim', 0) == 4 or (type(images_minibatch) is list and len(images_minibatch) > 0): # batch consists of images, [mini_b, c, h, w] + if vision_tower is not None: + image_features_minibatch, image_grid_thws_minibatch, vt_multi_level_features_minibatch = self.encode_images(images_minibatch, image_grid_thws) # [mini_b, l, c] + else: + image_features_minibatch = torch.randn(1).to(self.device) # dummy feature for video-only training under tuning + + # Map extracted image features back to their places in the original batch + for i, pos in enumerate(image_idx): + tmp_image_features[pos] = image_features_minibatch[i] + + # Handle auxiliary region features if enabled and boxes provided + if vision_tower_aux is not None and bbox_list is not None and len(bbox_list) > 0: + if isinstance(self.get_model().get_vision_tower(), Qwen2_5_VlVisionTower): + patch_size = self.get_model().get_vision_tower().config.patch_size + vt_images_size_minibatch = [im_grid_thw[0][-2:]*patch_size for im_grid_thw in image_grid_thws] + region_features = self.encode_regions(images_minibatch_aux, bbox_list, vt_multi_level_features_minibatch, vt_images_size_minibatch) # [mini_b, l, c] + else: + region_features = None + + # Same as above, but for video features if any + if getattr(videos_minibatch, 'ndim', 0) == 5: # batch consists of videos, [mini_b, c, t, h, w] + video_features_minibatch = self.encode_videos(videos_minibatch) # fake list [mini_b, t, l, c] + for i, pos in enumerate(video_idx): + tmp_image_features[pos] = video_features_minibatch[i] + + # Flatten image feature slot list to proper order for current batch + new_tmp = [] + for image in tmp_image_features: + # If multi-image per item, flatten out + if isinstance(image, list): + t = len(image) + for i in range(t): + new_tmp.append(image[i]) + else: + new_tmp.append(image) + image_features = new_tmp + + # =========================== Now, build multimodal input & target sequences ========================= + + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): + raise NotImplementedError + + _labels = labels + _position_ids = position_ids + _attention_mask = attention_mask + + # Default construction of masks etc. + if attention_mask is None: + attention_mask = torch.ones_like(input_ids, dtype=torch.bool) + else: + attention_mask = attention_mask.bool() + if position_ids is None: + position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) + if labels is None: + labels = torch.full_like(input_ids, IGNORE_INDEX) + + # For each batch item, strip padded tokens based on attention_mask + input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)] + labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] + + # If neither region auxiliary nor bboxes present: process classic image-text input + if vision_tower_aux is None and (bbox_list is None or all(x is None for x in bbox_list)): + new_input_embeds = [] + new_labels = [] + new_input_ids = [] + cur_image_idx = 0 + image_nums_in_batch = [] + + for batch_idx, cur_input_ids in enumerate(input_ids): + num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() + image_nums_in_batch.append(num_images) + # If there are no image markers, just get text features + if num_images == 0: + cur_image_features = image_features[cur_image_idx] + cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) + cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) + new_input_embeds.append(cur_input_embeds) + new_labels.append(labels[batch_idx]) + new_input_ids.append(cur_input_ids) + cur_image_idx += 1 + continue + + # Split on image token indices: replace them with image features after conversion + image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] + cur_input_ids_noim = [] + cur_labels = labels[batch_idx] + cur_labels_noim = [] + for i in range(len(image_token_indices) - 1): + cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]]) + cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]]) + split_sizes = [x.shape[0] for x in cur_labels_noim] + cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) + cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) + + cur_new_input_embeds = [] + cur_new_labels = [] + cur_new_input_ids = [] + for i in range(num_images + 1): + # Interleave text and image features + cur_new_input_embeds.append(cur_input_embeds_no_im[i]) + cur_new_labels.append(cur_labels_noim[i]) + cur_new_input_ids.append(cur_input_ids_noim[i]) + if i < num_images: + cur_image_features = image_features[cur_image_idx].to(self.device) + cur_image_idx += 1 + cur_new_input_embeds.append(cur_image_features) + cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) + cur_new_input_ids.append(torch.full((cur_image_features.shape[0],), self.config.image_token_id, device=cur_labels.device, dtype=cur_labels.dtype)) + cur_new_input_embeds = torch.cat(cur_new_input_embeds) + cur_new_labels = torch.cat(cur_new_labels) + cur_new_input_ids = torch.cat(cur_new_input_ids) + + new_input_embeds.append(cur_new_input_embeds) + new_labels.append(cur_new_labels) + new_input_ids.append(cur_new_input_ids) + # If region markers or region features enabled in config + else: + new_input_embeds = [] + new_labels = [] + new_input_ids = [] + cur_image_idx = 0 + image_nums_in_batch = [] + + for batch_idx, cur_input_ids in enumerate(input_ids): + cur_region_idx = 0 + # Detect image and region special token counts + num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() + num_regions = (cur_input_ids == DEFAULT_REGION_INDEX).sum() if DEFAULT_REGION_INDEX in cur_input_ids else 0 + image_nums_in_batch.append(num_images) + + # If no markers, just do text embedding for this item + if num_images == 0 and num_regions == 0: + cur_image_features = image_features[cur_image_idx] + cur_region_features = region_features[cur_region_idx] + cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) + cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_region_features[0:0]], dim=0) + new_input_embeds.append(cur_input_embeds) + new_labels.append(labels[batch_idx]) + new_input_ids.append(cur_input_ids) + cur_image_idx += 1 + continue + + # Get all special marker indices (image/region) + image_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + region_indices = torch.where(cur_input_ids == DEFAULT_REGION_INDEX)[0].tolist() if num_regions > 0 else [] + all_special_indices = sorted([-1] + image_indices + region_indices + [cur_input_ids.shape[0]]) + + # Split out plain text chunks between special markers + cur_input_ids_segments = [] + cur_labels = labels[batch_idx] + cur_labels_segments = [] + + for i in range(len(all_special_indices) - 1): + cur_input_ids_segments.append(cur_input_ids[all_special_indices[i]+1:all_special_indices[i+1]]) + cur_labels_segments.append(cur_labels[all_special_indices[i]+1:all_special_indices[i+1]]) + + # Project text ids to word embeddings + split_sizes = [x.shape[0] for x in cur_labels_segments] + cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_segments)) + if num_regions == 0 and vision_tower_aux is not None and region_features is not None: + cur_region_features = region_features[cur_region_idx] + temp_input_embeds = torch.cat([cur_input_embeds, cur_region_features[0:0]], dim=0) + cur_input_embeds = temp_input_embeds + + cur_input_embeds_segments = torch.split(cur_input_embeds, split_sizes, dim=0) + + # Reassemble text and image/region segments in order + cur_new_input_embeds = [] + cur_new_labels = [] + cur_new_input_ids = [] + + for i in range(len(all_special_indices) - 1): + # Insert current text segment + cur_new_input_embeds.append(cur_input_embeds_segments[i]) + cur_new_labels.append(cur_labels_segments[i]) + cur_new_input_ids.append(cur_input_ids_segments[i]) + # If next is image, insert feature representation + if all_special_indices[i+1] in image_indices: + cur_image_features = image_features[cur_image_idx].to(self.device) + cur_image_idx += 1 + cur_new_input_embeds.append(cur_image_features) + cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) + cur_new_input_ids.append(torch.full((cur_image_features.shape[0],), self.config.image_token_id, device=cur_labels.device, dtype=cur_labels.dtype)) + + # If next is region token, insert extracted region features + elif all_special_indices[i+1] in region_indices: + cur_region_features = region_features[batch_idx][cur_region_idx].to(self.device).unsqueeze(0) + cur_region_idx += 1 + cur_new_input_embeds.append(cur_region_features) + + cur_new_labels.append(torch.full((cur_region_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) + cur_new_input_ids.append(torch.full((cur_region_features.shape[0],), DEFAULT_REGION_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) + # Combine for this batch item + cur_new_input_embeds = torch.cat(cur_new_input_embeds) + cur_new_labels = torch.cat(cur_new_labels) + cur_new_input_ids = torch.cat(cur_new_input_ids) + new_input_embeds.append(cur_new_input_embeds) + new_labels.append(cur_new_labels) + new_input_ids.append(cur_new_input_ids) + # Truncate sequences to maximum model length, if image+region tokens caused overflow + tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) + if tokenizer_model_max_length is not None: + new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] + new_labels = [x[:tokenizer_model_max_length] for x in new_labels] + + # Pad sequences in the batch to same length; compute batch masks + max_len = max(x.shape[0] for x in new_input_embeds) + batch_size = len(new_input_embeds) + + new_input_embeds_padded = [] + new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device) + new_input_ids_padded = torch.full((batch_size, max_len), self.config.bos_token_id, dtype=new_input_ids[0].dtype, device=new_input_ids[0].device) + attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) + position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) + + # Left or right padding as per config; fill padded tensors + for i, (cur_new_embed, cur_new_labels, cur_new_input_ids) in enumerate(zip(new_input_embeds, new_labels, new_input_ids)): + cur_len = cur_new_embed.shape[0] + if getattr(self.config, 'tokenizer_padding_side', 'right') == "left": + # Left pad: add zeros before text tokens/features + new_input_embeds_padded.append(torch.cat(( + torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), + cur_new_embed + ), dim=0)) + if cur_len > 0: + new_labels_padded[i, -cur_len:] = cur_new_labels + attention_mask[i, -cur_len:] = True + position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) + else: + # Right pad: add zeros after text tokens/features + new_input_embeds_padded.append(torch.cat(( + cur_new_embed, + torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device) + ), dim=0)) + if cur_len > 0: + new_labels_padded[i, :cur_len] = cur_new_labels + new_input_ids_padded[i, :cur_len] = cur_new_input_ids + attention_mask[i, :cur_len] = True + position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) + + new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) + new_input_ids = new_input_ids_padded + + # Only set new_labels if original labels were not None + if _labels is None: + new_labels = None + else: + new_labels = new_labels_padded + + # Similarly handle provided attention_mask/position_ids overrides + if _attention_mask is None: + attention_mask = None + else: + attention_mask = attention_mask.to(dtype=_attention_mask.dtype) + + if _position_ids is None: + position_ids = None + + # For Qwen2.5 vision towers, use and concatenate image_grid_thws for positional computations + if isinstance(self.get_model().get_vision_tower(), Qwen2_5_VlVisionTower): + image_grid_thws = [] + cur_image_idx = 0 + for num_images in image_nums_in_batch: + if num_images == 0: + cur_image_idx += 1 + continue + image_grid_thws += image_grid_thws_minibatch[cur_image_idx:cur_image_idx+num_images] + cur_image_idx += num_images + + if len(image_grid_thws) > 0: + image_grid_thws = torch.cat(image_grid_thws, dim=0) + else: + image_grid_thws = None + + rope_index_kwargs = { + "input_ids": new_input_ids, + "image_grid_thw": image_grid_thws, + "video_grid_thw": None, + "attention_mask": attention_mask, + } + + # Compute new position_ids and rope_deltas for transformer (for rotary embeddings) + position_ids, rope_deltas = self.get_rope_index(**rope_index_kwargs) + cache_position = torch.arange(new_input_embeds.shape[1], device=new_input_embeds.device) + else: + rope_deltas = None + cache_position = None + # Final output is a tuple mimicking HuggingFace prepare_inputs_for_generation return + return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels, rope_deltas, cache_position + + # Patch forward() of HF CausalLM to allow multimodal embedding with images/regions + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + second_per_grid_ts: Optional[torch.Tensor] = None, + images: Optional[torch.FloatTensor] = None, + images_aux: Optional[torch.FloatTensor] = None, + bbox_list: Optional[torch.FloatTensor] = None, + image_grid_thws: Optional[torch.FloatTensor] = None, + ) -> Union[Tuple, Qwen2_5_VLCausalLMOutputWithPast]: + + if inputs_embeds is None: + ( + input_ids, + position_ids, + attention_mask, + past_key_values, + inputs_embeds, + labels, + rope_deltas, + cache_position + ) = self.prepare_inputs_labels_for_qwen2_5_vl_multimodal( + input_ids, + position_ids, + attention_mask, + past_key_values, + labels, + images, + images_aux, + bbox_list, + image_grid_thws + ) + + if rope_deltas is not None: + self.rope_deltas = rope_deltas + + # Call base CausalLM forward, with possibly replaced multimodal embeddings + out = super().forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + rope_deltas=rope_deltas, + cache_position=cache_position, + second_per_grid_ts=second_per_grid_ts, + return_dict=return_dict + ) + return out + + # Prepare model input dict for autoregressive generation (for use with generation methods like generate()) + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + cache_position=None, + position_ids=None, + use_cache=True, + pixel_values=None, + pixel_values_videos=None, + image_grid_thw=None, + video_grid_thw=None, + second_per_grid_ts=None, + images: Optional[torch.FloatTensor] = None, + images_aux: Optional[torch.FloatTensor] = None, + bbox_list: Optional[torch.FloatTensor] = None, + image_grid_thws: Optional[torch.FloatTensor] = None, + **kwargs, + ): + # Wrap parent logic so extra multimodal kwargs are preserved + model_inputs = super().prepare_inputs_for_generation( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + cache_position=cache_position, + pixel_values=pixel_values, + pixel_values_videos=pixel_values_videos, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + second_per_grid_ts=second_per_grid_ts, + images=images, + images_aux=images_aux, + bbox_list=bbox_list, + image_grid_thws=image_grid_thws, + ) + return model_inputs + +# Register our config and model with HuggingFace transformers registry +AutoConfig.register("omchat_qwen2_5_vl", OmChatQwen25VLConfig) +AutoModelForCausalLM.register(OmChatQwen25VLConfig, OmChatQwen25VLForCausalLM) diff --git a/vlm_fo1/model/multimodal_encoder/__init__.py b/vlm_fo1/model/multimodal_encoder/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vlm_fo1/model/multimodal_encoder/base_encoder.py b/vlm_fo1/model/multimodal_encoder/base_encoder.py new file mode 100755 index 0000000000000000000000000000000000000000..0918a4008736d44bedf1a1791f319ae7b34d6c69 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/base_encoder.py @@ -0,0 +1,33 @@ +import torch +import torch.nn as nn + + +class AbsVisionTower(nn.Module): + @torch.no_grad() + def forward(self, images): + raise NotImplementedError + + @property + def dummy_feature(self): + raise NotImplementedError + + @property + def dtype(self): + raise NotImplementedError + + @property + def device(self): + raise NotImplementedError + + @property + def config(self): + raise NotImplementedError + + + @property + def hidden_size(self): + raise NotImplementedError + + @property + def num_patches(self): + raise NotImplementedError diff --git a/vlm_fo1/model/multimodal_encoder/builder.py b/vlm_fo1/model/multimodal_encoder/builder.py new file mode 100755 index 0000000000000000000000000000000000000000..4aa6c18bb2f363fdedab568e8b49ba789858a53f --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/builder.py @@ -0,0 +1,38 @@ +# Builders for different vision tower backbones (MM encoder visual modules) +from .qwen2_5_vl_encoder import Qwen2_5_VlVisionTower # Main Qwen2.5 vision tower +from .davit_aux_encoder import DavitVisionTower as DavitVisionTowerAux # Auxiliary DaViT vision tower + +def build_vision_tower(vision_tower_cfg, **kwargs): + """ + Use model config to construct the main vision tower. + + vision_tower_cfg: should have attribute mm_vision_tower + Returns: instance of configured vision backbone + """ + vision_tower_name = getattr(vision_tower_cfg, 'mm_vision_tower', None) + # print(vision_tower_cfg) # Debug print of the config being used + + # Check for the Qwen2.5-VL vision model in tower name + if "qwen2.5-vl" in vision_tower_name.lower(): + return Qwen2_5_VlVisionTower(vision_tower_name, args=vision_tower_cfg, **kwargs) + + # Raise a clear error for unknown towers + raise ValueError(f'Unknown vision tower: {vision_tower_name}') + +def build_vision_tower_aux(vision_tower_cfg, **kwargs): + """ + Use model config to construct the auxiliary (helper) vision tower. + + vision_tower_cfg: should have attribute mm_vision_tower_aux + Returns: instance of configured auxiliary vision backbone + """ + vision_tower_aux = getattr(vision_tower_cfg, 'mm_vision_tower_aux', None) + # Optionally print config for debugging + # print(vision_tower_cfg) + + # Check for the DaViT auxiliary vision model in tower name + if 'davit' in vision_tower_aux.lower(): + return DavitVisionTowerAux(vision_tower_aux, args=vision_tower_cfg, **kwargs) + + # Raise a clear error if tower type is unknown + raise ValueError(f'Unknown aux vision tower: {vision_tower_aux}') diff --git a/vlm_fo1/model/multimodal_encoder/davit/configs.py b/vlm_fo1/model/multimodal_encoder/davit/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0dc9b07f7c29306111973544b0fed32dcd9613 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/davit/configs.py @@ -0,0 +1,152 @@ + +model_configs = { + "davit-base": { + "depths": [ + 1, + 1, + 9, + 1 + ], + "dim_embed": [ + 128, + 256, + 512, + 1024 + ], + "drop_path_rate": 0.1, + "enable_checkpoint": True, + "image_feature_source": [ + "spatial_avg_pool", + "temporal_avg_pool" + ], + "image_pos_embed": { + "max_pos_embeddings": 50, + "type": "learned_abs_2d" + }, + "num_groups": [ + 4, + 8, + 16, + 32 + ], + "num_heads": [ + 4, + 8, + 16, + 32 + ], + "patch_padding": [ + 3, + 1, + 1, + 1 + ], + "patch_prenorm": [ + False, + True, + True, + True + ], + "patch_size": [ + 7, + 3, + 3, + 3 + ], + "patch_stride": [ + 4, + 2, + 2, + 2 + ], + "projection_dim": 768, + "transformers_version": "4.41.2", + "visual_temporal_embedding": { + "max_temporal_embeddings": 100, + "type": "COSINE" + }, + "window_size": 12 + }, + "davit-large": { + "depths": [ + 1, + 1, + 9, + 1 + ], + "dim_embed": [ + 256, + 512, + 1024, + 2048 + ], + "drop_path_rate": 0.1, + "enable_checkpoint": True, + "image_feature_source": [ + "spatial_avg_pool", + "temporal_avg_pool" + ], + "image_pos_embed": { + "max_pos_embeddings": 50, + "type": "learned_abs_2d" + }, + "num_groups": [ + 8, + 16, + 32, + 64 + ], + "num_heads": [ + 8, + 16, + 32, + 64 + ], + "patch_padding": [ + 3, + 1, + 1, + 1 + ], + "patch_prenorm": [ + False, + True, + True, + True + ], + "patch_size": [ + 7, + 3, + 3, + 3 + ], + "patch_stride": [ + 4, + 2, + 2, + 2 + ], + "projection_dim": 1024, + "transformers_version": "4.41.2", + "visual_temporal_embedding": { + "max_temporal_embeddings": 100, + "type": "COSINE" + }, + "window_size": 12 + } +} + +img_cfg = { + "do_resize": True, + "size": { + "height": 768, + "width":768 + }, + "resample": 3, + "do_center_crop": False, + "do_rescale": True, + "do_normalize": True, + "image_mean": [0.485, 0.456, 0.406], + "image_std": [0.229, 0.224, 0.225], + "do_convert_rgb": True +} diff --git a/vlm_fo1/model/multimodal_encoder/davit/configuration_davit.py b/vlm_fo1/model/multimodal_encoder/davit/configuration_davit.py new file mode 100644 index 0000000000000000000000000000000000000000..72d435367ef9347eabdecb00f22ea5f76a90c4a0 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/davit/configuration_davit.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from transformers import AutoConfig +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +class DavitConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel + according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Florence2VisionModel architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + drop_path_rate (`float`, *optional*, defaults to 0.1): + The dropout rate of the drop path layer. + patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]): + The patch size of the image. + patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]): + The patch stride of the image. + patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]): + The patch padding of the image. + patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]): + Whether to apply layer normalization before the patch embedding layer. + enable_checkpoint (`bool`, *optional*, defaults to False): + Whether to enable checkpointing. + dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]): + The dimension of the embedding layer. + num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]): + The number of attention heads. + num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]): + The number of groups. + depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]): + The depth of the model. + window_size (`int`, *optional*, defaults to 12): + The window size of the model. + projection_dim (`int`, *optional*, defaults to 1024): + The dimension of the projection layer. + visual_temporal_embedding (`dict`, *optional*): + The configuration of the visual temporal embedding. + image_pos_embed (`dict`, *optional*): + The configuration of the image position embedding. + image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]): + The source of the image feature. + Example: + + ```python + >>> from transformers import Florence2VisionConfig, Florence2VisionModel + + >>> # Initializing a Florence2 Vision style configuration + >>> configuration = Florence2VisionConfig() + + >>> # Initializing a model (with random weights) + >>> model = Florence2VisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "florence2_vision" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + drop_path_rate=0.1, + patch_size=[7, 3, 3, 3], + patch_stride=[4, 2, 2, 2], + patch_padding=[3, 1, 1, 1], + patch_prenorm=[False, True, True, True], + enable_checkpoint=False, + dim_embed=[256, 512, 1024, 2048], + num_heads=[8, 16, 32, 64], + num_groups=[8, 16, 32, 64], + depths=[1, 1, 9, 1], + window_size=12, + projection_dim=1024, + visual_temporal_embedding=None, + image_pos_embed=None, + image_feature_source=["spatial_avg_pool", "temporal_avg_pool"], + **kwargs, + ): + self.drop_path_rate = drop_path_rate + self.patch_size = patch_size + self.patch_stride = patch_stride + self.patch_padding = patch_padding + self.patch_prenorm = patch_prenorm + self.enable_checkpoint = enable_checkpoint + self.dim_embed = dim_embed + self.num_heads = num_heads + self.num_groups = num_groups + self.depths = depths + self.window_size = window_size + self.projection_dim = projection_dim + self.visual_temporal_embedding = visual_temporal_embedding + self.image_pos_embed = image_pos_embed + self.image_feature_source = image_feature_source + + super().__init__(**kwargs) + + + diff --git a/vlm_fo1/model/multimodal_encoder/davit/image_processing_clip.py b/vlm_fo1/model/multimodal_encoder/davit/image_processing_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..131ceaa07cd055bebfb0fe985378af133cb40074 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/davit/image_processing_clip.py @@ -0,0 +1,370 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for CLIP.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from transformers.image_transforms import ( + convert_to_rgb, + get_resize_output_image_size, + resize, + to_channel_dimension_format, +) +from transformers.image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_flat_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from transformers.utils import TensorType, is_vision_available, logging + + +logger = logging.get_logger(__name__) + + +if is_vision_available(): + import PIL + + +class CLIPImageProcessor(BaseImageProcessor): + r""" + Constructs a CLIP image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by + `do_resize` in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): + Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the + `preprocess` method. + crop_size (`Dict[str, int]` *optional*, defaults to 224): + Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` + method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in + the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` + method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resize_mode: str = "squash", + candidate_sizes: List[int] = [384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1280, 1536, 1792, 2048], #[384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920, 1984, 2048], #[384, 448, 512, 576, 640, 704, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560], #[768, 1024, 1280, 1536, 1792, 2048] + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_center_crop: bool = True, + crop_size: Dict[str, int] = None, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"shortest_edge": 224} + size = get_size_dict(size, default_to_square=False) + crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} + crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + self._valid_processor_keys = [ + "images", + "do_resize", + "size", + "resample", + "do_center_crop", + "crop_size", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "do_convert_rgb", + "return_tensors", + "data_format", + "input_data_format", + ] + + # for backwards compatibility of KOSMOS-2 + if "use_square_size" in kwargs and kwargs["use_square_size"]: + self.size = {"height": size["shortest_edge"], "width": size["shortest_edge"]} + # Let's remove `use_square_size` (as it is removed from #27690), so the future Kosmos-2 image processors + # won't have this attr. being saved. (otherwise, it will enter this if branch while there is no more + # `shortest_edge` key. + delattr(self, "use_square_size") + + self.resize_mode = resize_mode + self.candidate_sizes = candidate_sizes + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge + resized to keep the input aspect ratio. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + default_to_square = True + if "shortest_edge" in size: + size = size["shortest_edge"] + default_to_square = False + elif "height" in size and "width" in size: + size = (size["height"], size["width"]) + else: + raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") + + if self.resize_mode == "dynamic_square": + w, h = image.shape[1], image.shape[0] + area = w * h + + # 找到最接近的目标尺寸 + target_size = self.candidate_sizes[0] + min_diff = float('inf') + + for cur_size in self.candidate_sizes: + target_area = cur_size * cur_size + diff = abs(target_area - area) + if diff < min_diff: + min_diff = diff + target_size = cur_size + size = (target_size, target_size) + + output_size = get_resize_output_image_size( + image, + size=size, + default_to_square=default_to_square, + input_data_format=input_data_format, + ) + + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: int = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): + Whether to center crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to + `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(size, param_name="size", default_to_square=False) + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True) + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + images = make_flat_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_center_crop=do_center_crop, + crop_size=crop_size, + do_resize=do_resize, + size=size, + resample=resample, + ) + + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if do_rescale and is_scaled_image(images[0]): + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + all_images = [] + for image in images: + if do_resize: + image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + + if do_center_crop: + image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) + + if do_rescale: + image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + + if do_normalize: + image = self.normalize( + image=image, mean=image_mean, std=image_std, input_data_format=input_data_format + ) + + all_images.append(image) + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + for image in all_images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["CLIPImageProcessor"] diff --git a/vlm_fo1/model/multimodal_encoder/davit/modeling_davit.py b/vlm_fo1/model/multimodal_encoder/davit/modeling_davit.py new file mode 100644 index 0000000000000000000000000000000000000000..f326007414cc77e4ca5a240e147557580f6b2407 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/davit/modeling_davit.py @@ -0,0 +1,527 @@ +import math +import torch +import torch.utils.checkpoint +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from collections import OrderedDict +from einops import rearrange +from timm.models.layers import DropPath, trunc_normal_ + +from transformers.utils import ( + logging, +) + +logger = logging.get_logger(__name__) + + + +class MySequential(nn.Sequential): + def forward(self, *inputs): + for module in self._modules.values(): + if type(inputs) == tuple: + inputs = module(*inputs) + else: + inputs = module(inputs) + return inputs + + +class PreNorm(nn.Module): + def __init__(self, norm, fn, drop_path=None): + super().__init__() + self.norm = norm + self.fn = fn + self.drop_path = drop_path + + def forward(self, x, *args, **kwargs): + shortcut = x + if self.norm != None: + x, size = self.fn(self.norm(x), *args, **kwargs) + else: + x, size = self.fn(x, *args, **kwargs) + + if self.drop_path: + x = self.drop_path(x) + + x = shortcut + x + + return x, size + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.net = nn.Sequential(OrderedDict([ + ("fc1", nn.Linear(in_features, hidden_features)), + ("act", act_layer()), + ("fc2", nn.Linear(hidden_features, out_features)) + ])) + + def forward(self, x, size): + return self.net(x), size + + +class DepthWiseConv2d(nn.Module): + def __init__( + self, + dim_in, + kernel_size, + padding, + stride, + bias=True, + ): + super().__init__() + self.dw = nn.Conv2d( + dim_in, dim_in, + kernel_size=kernel_size, + padding=padding, + groups=dim_in, + stride=stride, + bias=bias + ) + + def forward(self, x, size): + B, N, C = x.shape + H, W = size + assert N == H * W + + x = self.dw(x.transpose(1, 2).view(B, C, H, W)) + size = (x.size(-2), x.size(-1)) + x = x.flatten(2).transpose(1, 2) + return x, size + + +class ConvEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__( + self, + patch_size=7, + in_chans=3, + embed_dim=64, + stride=4, + padding=2, + norm_layer=None, + pre_norm=True + ): + super().__init__() + self.patch_size = patch_size + + self.proj = nn.Conv2d( + in_chans, embed_dim, + kernel_size=patch_size, + stride=stride, + padding=padding + ) + + dim_norm = in_chans if pre_norm else embed_dim + self.norm = norm_layer(dim_norm) if norm_layer else None + + self.pre_norm = pre_norm + + def forward(self, x, size): + H, W = size + if len(x.size()) == 3: + if self.norm and self.pre_norm: + x = self.norm(x) + x = rearrange( + x, 'b (h w) c -> b c h w', + h=H, w=W + ) + + x = self.proj(x) + + _, _, H, W = x.shape + x = rearrange(x, 'b c h w -> b (h w) c') + if self.norm and not self.pre_norm: + x = self.norm(x) + + return x, (H, W) + + +class ChannelAttention(nn.Module): + + def __init__(self, dim, groups=8, qkv_bias=True): + super().__init__() + + self.groups = groups + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + def forward(self, x, size): + B, N, C = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.groups, C // self.groups).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * (float(N) ** -0.5) + attention = q.transpose(-1, -2) @ k + attention = attention.softmax(dim=-1) + x = (attention @ v.transpose(-1, -2)).transpose(-1, -2) + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + return x, size + + +class ChannelBlock(nn.Module): + + def __init__(self, dim, groups, mlp_ratio=4., qkv_bias=True, + drop_path_rate=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, + conv_at_attn=True, conv_at_ffn=True): + super().__init__() + + drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + self.conv1 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_attn else None + self.channel_attn = PreNorm( + norm_layer(dim), + ChannelAttention(dim, groups=groups, qkv_bias=qkv_bias), + drop_path + ) + self.conv2 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_ffn else None + self.ffn = PreNorm( + norm_layer(dim), + Mlp(in_features=dim, hidden_features=int(dim*mlp_ratio), act_layer=act_layer), + drop_path + ) + + def forward(self, x, size): + if self.conv1: + x, size = self.conv1(x, size) + x, size = self.channel_attn(x, size) + + if self.conv2: + x, size = self.conv2(x, size) + x, size = self.ffn(x, size) + + return x, size + + +def window_partition(x, window_size: int): + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, batch_size: int, window_size: int, H: int, W: int): + B = batch_size + # this will cause onnx conversion failed for dynamic axis, because treated as constant + # int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + def __init__(self, dim, num_heads, window_size, qkv_bias=True): + + super().__init__() + self.dim = dim + self.window_size = window_size + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = float(head_dim) ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, size): + + H, W = size + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + x = window_partition(x, self.window_size) + x = x.view(-1, self.window_size * self.window_size, C) + + # W-MSA/SW-MSA + # attn_windows = self.attn(x_windows) + + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + attn = self.softmax(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + + # merge windows + x = x.view( + -1, self.window_size, self.window_size, C + ) + x = window_reverse(x, B, self.window_size, Hp, Wp) + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + return x, size + + +class SpatialBlock(nn.Module): + + def __init__(self, dim, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop_path_rate=0., act_layer=nn.GELU, + norm_layer=nn.LayerNorm, conv_at_attn=True, conv_at_ffn=True): + super().__init__() + + drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + self.conv1 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_attn else None + self.window_attn = PreNorm( + norm_layer(dim), + WindowAttention(dim, num_heads, window_size, qkv_bias=qkv_bias), + drop_path + ) + self.conv2 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_ffn else None + self.ffn = PreNorm( + norm_layer(dim), + Mlp(in_features=dim, hidden_features=int(dim*mlp_ratio), act_layer=act_layer), + drop_path + ) + + def forward(self, x, size): + if self.conv1: + x, size = self.conv1(x, size) + x, size = self.window_attn(x, size) + + if self.conv2: + x, size = self.conv2(x, size) + x, size = self.ffn(x, size) + return x, size + + +class DaViT(nn.Module): + """ DaViT: Dual-Attention Transformer + + Args: + in_chans (int): Number of input image channels. Default: 3. + num_classes (int): Number of classes for classification head. Default: 1000. + patch_size (tuple(int)): Patch size of convolution in different stages. Default: (7, 2, 2, 2). + patch_stride (tuple(int)): Patch stride of convolution in different stages. Default: (4, 2, 2, 2). + patch_padding (tuple(int)): Patch padding of convolution in different stages. Default: (3, 0, 0, 0). + patch_prenorm (tuple(bool)): If True, perform norm before convlution layer. Default: (True, False, False, False). + embed_dims (tuple(int)): Patch embedding dimension in different stages. Default: (64, 128, 192, 256). + num_heads (tuple(int)): Number of spatial attention heads in different stages. Default: (4, 8, 12, 16). + num_groups (tuple(int)): Number of channel groups in different stages. Default: (4, 8, 12, 16). + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True. + drop_path_rate (float): Stochastic depth rate. Default: 0.1. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + enable_checkpoint (bool): If True, enable checkpointing. Default: False. + conv_at_attn (bool): If True, performe depthwise convolution before attention layer. Default: True. + conv_at_ffn (bool): If True, performe depthwise convolution before ffn layer. Default: True. + """ + + def __init__( + self, + in_chans=3, + num_classes=1000, + depths=(1, 1, 3, 1), + patch_size=(7, 2, 2, 2), + patch_stride=(4, 2, 2, 2), + patch_padding=(3, 0, 0, 0), + patch_prenorm=(False, False, False, False), + embed_dims=(64, 128, 192, 256), + num_heads=(3, 6, 12, 24), + num_groups=(3, 6, 12, 24), + window_size=7, + mlp_ratio=4., + qkv_bias=True, + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + enable_checkpoint=False, + conv_at_attn=True, + conv_at_ffn=True + ): + super().__init__() + + self.num_classes = num_classes + self.embed_dims = embed_dims + self.num_heads = num_heads + self.num_groups = num_groups + self.num_stages = len(self.embed_dims) + self.enable_checkpoint = enable_checkpoint + assert self.num_stages == len(self.num_heads) == len(self.num_groups) + + num_stages = len(embed_dims) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)*2)] + + depth_offset = 0 + convs = [] + blocks = [] + for i in range(num_stages): + conv_embed = ConvEmbed( + patch_size=patch_size[i], + stride=patch_stride[i], + padding=patch_padding[i], + in_chans=in_chans if i == 0 else self.embed_dims[i - 1], + embed_dim=self.embed_dims[i], + norm_layer=norm_layer, + pre_norm=patch_prenorm[i] + ) + convs.append(conv_embed) + + block = MySequential( + *[ + MySequential(OrderedDict([ + ( + 'spatial_block', SpatialBlock( + embed_dims[i], + num_heads[i], + window_size, + drop_path_rate=dpr[depth_offset+j*2], + qkv_bias=qkv_bias, + mlp_ratio=mlp_ratio, + conv_at_attn=conv_at_attn, + conv_at_ffn=conv_at_ffn, + ) + ), + ( + 'channel_block', ChannelBlock( + embed_dims[i], + num_groups[i], + drop_path_rate=dpr[depth_offset+j*2+1], + qkv_bias=qkv_bias, + mlp_ratio=mlp_ratio, + conv_at_attn=conv_at_attn, + conv_at_ffn=conv_at_ffn, + ) + ) + ])) for j in range(depths[i]) + ] + ) + blocks.append(block) + depth_offset += depths[i]*2 + + self.convs = nn.ModuleList(convs) + self.blocks = nn.ModuleList(blocks) + + # self.norms = norm_layer(self.embed_dims[-1]) + # self.avgpool = nn.AdaptiveAvgPool1d(1) + # self.head = nn.Linear(self.embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + @property + def dim_out(self): + return self.embed_dims[-1] + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, std=0.02) + for name, _ in m.named_parameters(): + if name in ['bias']: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.weight, 1.0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.0) + nn.init.constant_(m.bias, 0) + + def forward_features_unpool(self, x): + """ + forward until avg pooling + Args: + x (_type_): input image tensor + """ + input_size = (x.size(2), x.size(3)) + for conv, block in zip(self.convs, self.blocks): + x, input_size = conv(x, input_size) + if self.enable_checkpoint: + x, input_size = checkpoint.checkpoint(block, x, input_size) + else: + x, input_size = block(x, input_size) + return x + + # def forward_features(self, x): + # x = self.forward_features_unpool(x) + + # # (batch_size, num_tokens, token_dim) + # x = self.avgpool(x.transpose(1, 2)) + # # (batch_size, 1, num_tokens) + # x = torch.flatten(x, 1) + # x = self.norms(x) + + # return x + + def forward_features(self, x): + """ + forward until avg pooling + Args: + x (_type_): input image tensor + """ + outs = [] + input_size = (x.size(2), x.size(3)) + for i, (conv, block) in enumerate(zip(self.convs, self.blocks)): + x, input_size = conv(x, input_size) + if self.enable_checkpoint and self.training: + x, input_size = checkpoint.checkpoint(block, x, input_size, use_reentrant=False) + else: + x, input_size = block(x, input_size) + H, W = input_size + x_out = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W) + outs.append(x_out) + + # if i in self._out_features: + # norm_layer = getattr(self, f'norm{i}') + # x_out = norm_layer(x) + # H, W = input_size + # x_out = rearrange(x_out, 'b (h w) c -> b c h w', h=H, w=W) + # outs.append(x_out) + + return { + "image_features": outs, + "last_feat": outs[-1], + } + + def forward(self, x): + x = self.forward_features(x) + # x = self.head(x) + return x + + @classmethod + def from_config(cls, config, enable_checkpoint=False): + return cls( + depths=config.depths, + embed_dims=config.dim_embed, + num_heads=config.num_heads, + num_groups=config.num_groups, + patch_size=config.patch_size, + patch_stride=config.patch_stride, + patch_padding=config.patch_padding, + patch_prenorm=config.patch_prenorm, + drop_path_rate=config.drop_path_rate, + window_size=config.window_size, + enable_checkpoint=enable_checkpoint + ) diff --git a/vlm_fo1/model/multimodal_encoder/davit_aux_encoder.py b/vlm_fo1/model/multimodal_encoder/davit_aux_encoder.py new file mode 100755 index 0000000000000000000000000000000000000000..63883f3202072e2e5b3e9b09a274e0baf406b905 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/davit_aux_encoder.py @@ -0,0 +1,98 @@ +from vlm_fo1.model.multimodal_encoder.base_encoder import AbsVisionTower +from vlm_fo1.model.multimodal_encoder.davit.configuration_davit import DavitConfig +from vlm_fo1.model.multimodal_encoder.davit.configs import model_configs, img_cfg +from vlm_fo1.model.multimodal_encoder.davit.modeling_davit import DaViT +from vlm_fo1.model.multimodal_encoder.davit.image_processing_clip import CLIPImageProcessor + +# Auxiliary DaViT-based vision tower for multi-modal encoder framework. +# This class manages configuration, processing, and dynamic instantiation of DaViT models. +class DavitVisionTower(AbsVisionTower): + def __init__(self, vision_tower_name, args, delay_load=False, image_size=768, aspect_ratio='squash'): + """ + Args: + vision_tower_name: Identifier string for model variant (usually a file name or config section). + args: Parent MM model/global config (currently ignored). + delay_load: If True, only config is loaded, not the weights/model (for e.g., lazy instantiation). + image_size: Target size to which images are resized (unless aspect_ratio=='dynamic'). + aspect_ratio: Controls how input aspect ratio is handled ('squash', 'dynamic', etc.). + """ + super().__init__() + self.is_loaded = False + self.vision_tower_name = vision_tower_name + self.aspect_ratio = aspect_ratio + self.image_size = image_size + + # In this implementation, training flag is ignored (always uses pretrained weights). + is_train = False + # if not delay_load: + # self.load_model(is_train, self.image_size, self.aspect_ratio) + # else: + # # Only load/prepare configuration (not model weights or modules) + # cfg_dict = model_configs[self.vision_tower_name.split('/')[-1].replace('.pth', '')] + # vision_cfg = DavitConfig.from_dict(cfg_dict) + # vision_cfg.image_size = image_size + # self.cfg_only = vision_cfg + self.load_model(is_train, self.image_size, self.aspect_ratio) + + def load_model(self, is_train=False, image_size=768, aspect_ratio='squash'): + """ + Actually loads the DaViT model (with weights) and its image processor. + Sets up resizing/aspect handling as needed. + """ + cfg_dict = model_configs[self.vision_tower_name.split('/')[-1].replace('.pth', '')] + vision_cfg = DavitConfig.from_dict(cfg_dict) + vision_cfg.image_size = image_size + self.image_tower = DaViT.from_config(config=vision_cfg, enable_checkpoint=True) + self.image_tower.config = vision_cfg + img_cfg['resize_mode'] = aspect_ratio + # If using 'dynamic' aspect ratio, disable resizing for the processor + if aspect_ratio == 'dynamic': # dynamic aspect ratio means no resizing, use the original image size, and the image_size parameter is not used + img_cfg['do_resize'] = False + self.image_processor = CLIPImageProcessor(**img_cfg) + + self.is_loaded = True + + def forward(self, images): + """ + Runs the auxiliary DaViT encoder. + Args: + images: Torch tensor, or list of tensors, of images to encode. + Returns: + List of image feature outputs (typically 4-stage outputs per image). + """ + # If input is a list of images, encode each separately. + if type(images) is list: + image_features = [] + for image in images: + # Forward pass: returns 4-stage outputs; caller must handle downstream selection/merging. + image_features.append(self.image_tower.forward(image.to(device=self.device, dtype=self.dtype))) # this returns 4 stage output + return image_features + else: + # Single image: compute features, return as a length-1 list for consistency. + # image_features = self.image_tower.forward(images.to(device=self.device, dtype=self.dtype)) # this returns 4 stage output + # return [image_features] # return the last layer for now + raise NotImplementedError + + @property + def dtype(self): + # Expose main tensor dtype to external utilities (e.g., for caller to move data to right dtype). + return self.image_tower.convs[0].proj.weight.dtype + + @property + def device(self): + # Expose main parameter device so inputs and other dependent modules use matching device. + return self.image_tower.convs[0].proj.weight.device + + @property + def config(self): + # Get configuration in loaded or 'config only' state + if self.is_loaded: + return self.image_tower.config + else: + return self.cfg_only + + @property + def hidden_size(self): + # Hidden size: sum of embedding dims (all multi-stage outputs). + return sum(self.image_tower.embed_dims) + diff --git a/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/__init__.py b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/configuration_qwen2_5_vl.py b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/configuration_qwen2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..aeaba127cc1f83b94c5c88683e040a1c342aadca --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/configuration_qwen2_5_vl.py @@ -0,0 +1,258 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_qwen2_5_vl.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from transformers.configuration_utils import PretrainedConfig +from transformers.modeling_rope_utils import rope_config_validation + + +class Qwen2_5_VLVisionConfig(PretrainedConfig): + model_type = "qwen2_5_vl" + base_config_key = "vision_config" + + def __init__( + self, + depth=32, + hidden_size=3584, + hidden_act="silu", + intermediate_size=3420, + num_heads=16, + in_channels=3, + patch_size=14, + spatial_merge_size=2, + temporal_patch_size=2, + tokens_per_second=4, + window_size=112, + out_hidden_size=3584, + fullatt_block_indexes=[7, 15, 23, 31], + **kwargs, + ): + super().__init__(**kwargs) + + self.depth = depth + self.hidden_size = hidden_size + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.num_heads = num_heads + self.in_channels = in_channels + self.patch_size = patch_size + self.spatial_merge_size = spatial_merge_size + self.temporal_patch_size = temporal_patch_size + self.tokens_per_second = tokens_per_second + self.window_size = window_size + self.fullatt_block_indexes = fullatt_block_indexes + self.out_hidden_size = out_hidden_size + + +class Qwen2_5_VLConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Qwen2_5_VLModel`]. It is used to instantiate a + Qwen2-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of + Qwen2-VL-7B-Instruct [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 152064): + Vocabulary size of the Qwen2_5_VL model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Qwen2_5_VLModel`] + hidden_size (`int`, *optional*, defaults to 8192): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 29568): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 80): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 64): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*, defaults to 8): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 32768): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether the model's input and output word embeddings should be tied. + rope_theta (`float`, *optional*, defaults to 1000000.0): + The base period of the RoPE embeddings. + use_sliding_window (`bool`, *optional*, defaults to `False`): + Whether to use sliding window attention. + sliding_window (`int`, *optional*, defaults to 4096): + Sliding window attention (SWA) window size. If not specified, will default to `4096`. + max_window_layers (`int`, *optional*, defaults to 80): + The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + vision_config (`Dict`, *optional*): + The config for the visual encoder initialization. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type + and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value + accordingly. + Expected contents: + `rope_type` (`str`): + The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', + 'llama3'], with 'default' being the original RoPE implementation. + `factor` (`float`, *optional*): + Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In + most scaling types, a `factor` of x will enable the model to handle sequences of length x * + original maximum pre-trained length. + `original_max_position_embeddings` (`int`, *optional*): + Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during + pretraining. + `attention_factor` (`float`, *optional*): + Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention + computation. If unspecified, it defaults to value recommended by the implementation, using the + `factor` field to infer the suggested value. + `beta_fast` (`float`, *optional*): + Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear + ramp function. If unspecified, it defaults to 32. + `beta_slow` (`float`, *optional*): + Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear + ramp function. If unspecified, it defaults to 1. + `short_factor` (`List[float]`, *optional*): + Only used with 'longrope'. The scaling factor to be applied to short contexts (< + `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden + size divided by the number of attention heads divided by 2 + `long_factor` (`List[float]`, *optional*): + Only used with 'longrope'. The scaling factor to be applied to long contexts (< + `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden + size divided by the number of attention heads divided by 2 + `low_freq_factor` (`float`, *optional*): + Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE + `high_freq_factor` (`float`, *optional*): + Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE + + ```python + >>> from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLConfig + + >>> # Initializing a Qwen2_5_VL style configuration + >>> configuration = Qwen2_5_VLConfig() + + >>> # Initializing a model from the Qwen2-VL-7B style configuration + >>> model = Qwen2_5_VLForConditionalGeneration(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "qwen2_5_vl" + sub_configs = {"vision_config": Qwen2_5_VLVisionConfig} + keys_to_ignore_at_inference = ["past_key_values"] + # Default tensor parallel plan for base model `Qwen2_5_VL` + base_model_tp_plan = { + "layers.*.self_attn.q_proj": "colwise", + "layers.*.self_attn.k_proj": "colwise", + "layers.*.self_attn.v_proj": "colwise", + "layers.*.self_attn.o_proj": "rowwise", + "layers.*.mlp.gate_proj": "colwise", + "layers.*.mlp.up_proj": "colwise", + "layers.*.mlp.down_proj": "rowwise", + } + base_model_pp_plan = { + "embed_tokens": (["input_ids"], ["inputs_embeds"]), + "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), + "norm": (["hidden_states"], ["hidden_states"]), + } + + def __init__( + self, + vocab_size=152064, + hidden_size=8192, + intermediate_size=29568, + num_hidden_layers=80, + num_attention_heads=64, + num_key_value_heads=8, + hidden_act="silu", + max_position_embeddings=32768, + initializer_range=0.02, + rms_norm_eps=1e-05, + use_cache=True, + tie_word_embeddings=False, + rope_theta=1000000.0, + use_sliding_window=False, + sliding_window=4096, + max_window_layers=80, + attention_dropout=0.0, + vision_config=None, + rope_scaling=None, + **kwargs, + ): + if isinstance(vision_config, dict): + self.vision_config = self.sub_configs["vision_config"](**vision_config) + elif vision_config is None: + self.vision_config = self.sub_configs["vision_config"]() + + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.use_sliding_window = use_sliding_window + self.sliding_window = sliding_window + self.max_window_layers = max_window_layers + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.attention_dropout = attention_dropout + self.rope_scaling = rope_scaling + + # Validate the correctness of rotary position embeddings parameters + # BC: if there is a 'type' field, move it to 'rope_type'. + # and change type from 'mrope' to 'default' because `mrope` does default RoPE calculations + # one can set it to "linear"/"dynamic" etc. to have scaled RoPE + # TODO: @raushan update config in the hub + if self.rope_scaling is not None and "type" in self.rope_scaling: + if self.rope_scaling["type"] == "mrope": + self.rope_scaling["type"] = "default" + self.rope_scaling["rope_type"] = self.rope_scaling["type"] + rope_config_validation(self, ignore_keys={"mrope_section"}) + + super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["Qwen2_5_VLConfig"] \ No newline at end of file diff --git a/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/modeling_qwen2_5_vl.py b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/modeling_qwen2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..e1de8197edf489fbb94e5c48b9985b69ca66666b --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -0,0 +1,2072 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_qwen2_5_vl.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache +from transformers.generation import GenerationMixin +from transformers.modeling_attn_mask_utils import AttentionMaskConverter +from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput +from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_qwen2_5_vl import Qwen2_5_VLConfig, Qwen2_5_VLVisionConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_varlen_func + from flash_attn.layers.rotary import apply_rotary_emb + +else: + flash_attn_varlen_func = None + apply_rotary_emb = None + + +if is_flash_attn_2_available(): + from transformers.modeling_flash_attention_utils import _flash_attention_forward +else: + flash_attn_varlen_func = None + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "Qwen2_5_VLConfig" + + +class Qwen2_5_VLMLP(nn.Module): + def __init__(self, config, bias: bool = False): + super().__init__() + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, hidden_state): + return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) + + +class Qwen2_5_VisionPatchEmbed(nn.Module): + def __init__( + self, + patch_size: int = 14, + temporal_patch_size: int = 2, + in_channels: int = 3, + embed_dim: int = 1152, + ) -> None: + super().__init__() + self.patch_size = patch_size + self.temporal_patch_size = temporal_patch_size + self.in_channels = in_channels + self.embed_dim = embed_dim + + kernel_size = [temporal_patch_size, patch_size, patch_size] + self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + target_dtype = self.proj.weight.dtype + hidden_states = hidden_states.view( + -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size + ) + hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) + return hidden_states + + +class Qwen2_5_VisionRotaryEmbedding(nn.Module): + def __init__(self, dim: int, theta: float = 10000.0) -> None: + super().__init__() + inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + def forward(self, seqlen: int) -> torch.Tensor: + seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.outer(seq, self.inv_freq) + return freqs + + +class Qwen2RMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Qwen2RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + def extra_repr(self): + return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" + + +class Qwen2_5_VLPatchMerger(nn.Module): + def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None: + super().__init__() + self.hidden_size = context_dim * (spatial_merge_size**2) + self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6) + self.mlp = nn.Sequential( + nn.Linear(self.hidden_size, self.hidden_size), + nn.GELU(), + nn.Linear(self.hidden_size, dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.mlp(self.ln_q(x).view(-1, self.hidden_size)) + return x + + +def apply_rotary_pos_emb_flashatt( + q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + cos = cos.chunk(2, dim=-1)[0].contiguous() + sin = sin.chunk(2, dim=-1)[0].contiguous() + q_embed = apply_rotary_emb(q.float(), cos.float(), sin.float()).type_as(q) + k_embed = apply_rotary_emb(k.float(), cos.float(), sin.float()).type_as(k) + return q_embed, k_embed + + +class Qwen2_5_VLVisionFlashAttention2(nn.Module): + def __init__(self, dim: int, num_heads: int = 16) -> None: + super().__init__() + self.num_heads = num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.proj = nn.Linear(dim, dim) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be " + "removed and `position_embeddings` will be mandatory." + ) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + cos = emb.cos() + sin = emb.sin() + else: + cos, sin = position_embeddings + q, k = apply_rotary_pos_emb_flashatt(q.unsqueeze(0), k.unsqueeze(0), cos, sin) + q = q.squeeze(0) + k = k.squeeze(0) + + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() + attn_output = flash_attn_varlen_func(q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen).reshape( + seq_length, -1 + ) + attn_output = self.proj(attn_output) + return attn_output + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb_vision( + q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + orig_q_dtype = q.dtype + orig_k_dtype = k.dtype + q, k = q.float(), k.float() + cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float() + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + q_embed = q_embed.to(orig_q_dtype) + k_embed = k_embed.to(orig_k_dtype) + return q_embed, k_embed + + +class Qwen2_5_VLVisionAttention(nn.Module): + def __init__(self, dim: int, num_heads: int = 16) -> None: + super().__init__() + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.proj = nn.Linear(dim, dim) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be " + "removed and `position_embeddings` will be mandatory." + ) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + cos = emb.cos() + sin = emb.sin() + else: + cos, sin = position_embeddings + q, k = apply_rotary_pos_emb_vision(q, k, cos, sin) + + attention_mask = torch.full( + [1, seq_length, seq_length], torch.finfo(q.dtype).min, device=q.device, dtype=q.dtype + ) + for i in range(1, len(cu_seqlens)): + attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 + + q = q.transpose(0, 1) + k = k.transpose(0, 1) + v = v.transpose(0, 1) + attn_weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(self.head_dim) + attn_weights = attn_weights + attention_mask + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) + attn_output = torch.matmul(attn_weights, v) + attn_output = attn_output.transpose(0, 1) + attn_output = attn_output.reshape(seq_length, -1) + attn_output = self.proj(attn_output) + return attn_output + + +class Qwen2_5_VLVisionSdpaAttention(nn.Module): + def __init__(self, dim: int, num_heads: int = 16) -> None: + super().__init__() + self.num_heads = num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.proj = nn.Linear(dim, dim) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be " + "removed and `position_embeddings` will be mandatory." + ) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + cos = emb.cos() + sin = emb.sin() + else: + cos, sin = position_embeddings + q, k = apply_rotary_pos_emb_vision(q, k, cos, sin) + + attention_mask = torch.zeros([1, seq_length, seq_length], device=q.device, dtype=torch.bool) + for i in range(1, len(cu_seqlens)): + attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = True + q = q.transpose(0, 1) + k = k.transpose(0, 1) + v = v.transpose(0, 1) + attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0) + attn_output = attn_output.transpose(0, 1) + attn_output = attn_output.reshape(seq_length, -1) + attn_output = self.proj(attn_output) + return attn_output + + +QWEN2_5_VL_VISION_ATTENTION_CLASSES = { + "eager": Qwen2_5_VLVisionAttention, + "flash_attention_2": Qwen2_5_VLVisionFlashAttention2, + "sdpa": Qwen2_5_VLVisionSdpaAttention, +} + + +class Qwen2_5_VLVisionBlock(nn.Module): + def __init__(self, config, attn_implementation: str = "sdpa") -> None: + super().__init__() + self.norm1 = Qwen2RMSNorm(config.hidden_size, eps=1e-6) + self.norm2 = Qwen2RMSNorm(config.hidden_size, eps=1e-6) + self.attn = QWEN2_5_VL_VISION_ATTENTION_CLASSES[attn_implementation]( + config.hidden_size, num_heads=config.num_heads + ) + self.mlp = Qwen2_5_VLMLP(config, bias=True) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + hidden_states = hidden_states + self.attn( + self.norm1(hidden_states), + cu_seqlens=cu_seqlens, + rotary_pos_emb=rotary_pos_emb, + position_embeddings=position_embeddings, + ) + hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) + return hidden_states + + +Qwen2_5_VL_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Qwen2_5_VLConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Qwen2_5_VL Model outputting raw hidden-states without any specific head on top.", + Qwen2_5_VL_START_DOCSTRING, +) +class Qwen2_5_VLPreTrainedModel(PreTrainedModel): + config_class = Qwen2_5_VLConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["Qwen2_5_VLDecoderLayer", "Qwen2_5_VLVisionBlock"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + _supports_static_cache = False # TODO (joao): fix. torch.compile failing probably due to `cache_positions` + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, (nn.Linear, nn.Conv3d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +class Qwen2_5_VisionTransformerPretrainedModel(Qwen2_5_VLPreTrainedModel): + config_class = Qwen2_5_VLVisionConfig + _no_split_modules = ["Qwen2_5_VLVisionBlock"] + + def __init__(self, config, *inputs, **kwargs) -> None: + super().__init__(config, *inputs, **kwargs) + self.spatial_merge_size = config.spatial_merge_size + self.patch_size = config.patch_size + self.fullatt_block_indexes = config.fullatt_block_indexes + self.window_size = config.window_size + self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size + + self.patch_embed = Qwen2_5_VisionPatchEmbed( + patch_size=config.patch_size, + temporal_patch_size=config.temporal_patch_size, + in_channels=config.in_channels, + embed_dim=config.hidden_size, + ) + + head_dim = config.hidden_size // config.num_heads + self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2) + + self.blocks = nn.ModuleList( + [Qwen2_5_VLVisionBlock(config, config._attn_implementation) for _ in range(config.depth)] + ) + self.merger = Qwen2_5_VLPatchMerger( + dim=config.out_hidden_size, + context_dim=config.hidden_size, + spatial_merge_size=config.spatial_merge_size, + ) + self.gradient_checkpointing = False + + def rot_pos_emb(self, grid_thw): + pos_ids = [] + for t, h, w in grid_thw: + hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) + hpos_ids = hpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + hpos_ids = hpos_ids.permute(0, 2, 1, 3) + hpos_ids = hpos_ids.flatten() + + wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) + wpos_ids = wpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + wpos_ids = wpos_ids.permute(0, 2, 1, 3) + wpos_ids = wpos_ids.flatten() + pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) + pos_ids = torch.cat(pos_ids, dim=0) + max_grid_size = grid_thw[:, 1:].max() + rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) + rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) + return rotary_pos_emb + + def get_window_index(self, grid_thw): + window_index: list = [] + cu_window_seqlens: list = [0] + window_index_id = 0 + vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size + + for grid_t, grid_h, grid_w in grid_thw: + llm_grid_h, llm_grid_w = ( + grid_h // self.spatial_merge_size, + grid_w // self.spatial_merge_size, + ) + index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) + pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size + pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size + num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size + num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size + index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) + index_padded = index_padded.reshape( + grid_t, + num_windows_h, + vit_merger_window_size, + num_windows_w, + vit_merger_window_size, + ) + index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( + grid_t, + num_windows_h * num_windows_w, + vit_merger_window_size, + vit_merger_window_size, + ) + seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) + index_padded = index_padded.reshape(-1) + index_new = index_padded[index_padded != -100] + window_index.append(index_new + window_index_id) + cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] + cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) + window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() + window_index = torch.cat(window_index, dim=0) + + return window_index, cu_window_seqlens + + def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor: + """ + Args: + hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): + The final hidden states of the model. + grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): + The temporal, height and width of feature shape of each image in LLM. + + Returns: + `torch.Tensor`: hidden_states. + """ + hidden_states = self.patch_embed(hidden_states) + rotary_pos_emb = self.rot_pos_emb(grid_thw) + window_index, cu_window_seqlens = self.get_window_index(grid_thw) + cu_window_seqlens = torch.tensor( + cu_window_seqlens, + device=hidden_states.device, + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) + + seq_len, _ = hidden_states.size() + hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + hidden_states = hidden_states[window_index, :, :] + hidden_states = hidden_states.reshape(seq_len, -1) + rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + rotary_pos_emb = rotary_pos_emb[window_index, :, :] + rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + position_embeddings = (emb.cos(), emb.sin()) + + cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( + dim=0, + # Select dtype based on the following factors: + # - FA2 requires that cu_seqlens_q must have dtype int32 + # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw + # See https://github.com/huggingface/transformers/pull/34852 for more information + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) + + for layer_num, blk in enumerate(self.blocks): + if layer_num in self.fullatt_block_indexes: + cu_seqlens_now = cu_seqlens + else: + cu_seqlens_now = cu_window_seqlens + if self.gradient_checkpointing and self.training: + hidden_states = self._gradient_checkpointing_func( + blk.__call__, hidden_states, cu_seqlens_now, None, position_embeddings, use_reentrant=False + ) + else: + hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings) + + hidden_states = self.merger(hidden_states) + reverse_indices = torch.argsort(window_index) + hidden_states = hidden_states[reverse_indices, :] + + return hidden_states + + +class Qwen2_5_VLRotaryEmbedding(nn.Module): + def __init__(self, config: Qwen2_5_VLConfig, device=None): + super().__init__() + # BC: "rope_type" was originally "type" + if hasattr(config, "rope_scaling") and config.rope_scaling is not None: + self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) + else: + self.rope_type = "default" + self.max_seq_len_cached = config.max_position_embeddings + self.original_max_seq_len = config.max_position_embeddings + + self.config = config + self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] + + inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.original_inv_freq = self.inv_freq + + def _dynamic_frequency_update(self, position_ids, device): + """ + dynamic RoPE layers should recompute `inv_freq` in the following situations: + 1 - growing beyond the cached sequence length (allow scaling) + 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) + """ + seq_len = torch.max(position_ids) + 1 + if seq_len > self.max_seq_len_cached: # growth + inv_freq, self.attention_scaling = self.rope_init_fn( + self.config, device, seq_len=seq_len, **self.rope_kwargs + ) + self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation + self.max_seq_len_cached = seq_len + + if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset + self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) + self.max_seq_len_cached = self.original_max_seq_len + + # @torch.no_grad() + def forward(self, x, position_ids): + if "dynamic" in self.rope_type: + self._dynamic_frequency_update(position_ids, device=x.device) + + # Core RoPE block. In contrast to other models, Qwen2_5_VL has different position ids for the grids + # So we expand the inv_freq to shape (3, ...) + inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) + position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) + # Force float32 (see https://github.com/huggingface/transformers/pull/29285) + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + + # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention + cos = cos * self.attention_scaling + sin = sin * self.attention_scaling + + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class Qwen2MLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + + +def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): + """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). + + Explanation: + Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding + sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For + vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. + Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. + For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, + height and width) of text embedding is always the same, so the text embedding rotary position embedding has no + difference with modern LLMs. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + mrope_section(`List(int)`): + Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + mrope_section = mrope_section * 2 + cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( + unsqueeze_dim + ) + sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( + unsqueeze_dim + ) + + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class Qwen2_5_VLAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: Qwen2_5_VLConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.is_causal = True + self.attention_dropout = config.attention_dropout + self.rope_scaling = config.rope_scaling + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.rotary_emb = Qwen2_5_VLRotaryEmbedding(config=config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + + cos, sin = position_embeddings + query_states, key_states = apply_multimodal_rotary_pos_emb( + query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] + ) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # Fix precision issues in Qwen2-VL float16 inference + # Replace inf values with zeros in attention weights to prevent NaN propagation + if query_states.dtype == torch.float16: + attn_weights = torch.where(torch.isinf(attn_weights), torch.zeros_like(attn_weights), attn_weights) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, -1) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Qwen2_5_VLFlashAttention2(Qwen2_5_VLAttention): + """ + Qwen2_5_VL flash attention module, following Qwen2_5_VL attention module. This module inherits from `Qwen2_5_VLAttention` + as the weights of the module stays untouched. The only required change would be on the forward pass + where it needs to correctly call the public API of flash attention and deal with padding tokens + in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom + config.max_window_layers layers. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + ): + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + + # Because the input can be padded, the absolute sequence length depends on the max position id. + cos, sin = position_embeddings + query_states, key_states = apply_multimodal_rotary_pos_emb( + query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] + ) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + if ( + self.config.use_sliding_window + and getattr(self.config, "sliding_window", None) is not None + and self.layer_idx >= self.config.max_window_layers + ): + sliding_window = self.config.sliding_window + else: + sliding_window = None + + attn_output = _flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + dropout=dropout_rate, + sliding_window=sliding_window, + is_causal=self.is_causal, + use_top_left_mask=self._flash_attn_uses_top_left_mask, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Qwen2_5_VLSdpaAttention(Qwen2_5_VLAttention): + """ + Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from Qwen2Attention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "Qwen2_5_VLModel is using Qwen2_5_VLSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + + cos, sin = position_embeddings + query_states, key_states = apply_multimodal_rotary_pos_emb( + query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] + ) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal = True if causal_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +QWEN2_5_VL_ATTENTION_CLASSES = { + "eager": Qwen2_5_VLAttention, + "flash_attention_2": Qwen2_5_VLFlashAttention2, + "sdpa": Qwen2_5_VLSdpaAttention, +} + + +class Qwen2_5_VLDecoderLayer(nn.Module): + def __init__(self, config: Qwen2_5_VLConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + if config.use_sliding_window and config._attn_implementation != "flash_attention_2": + logger.warning_once( + f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " + "unexpected results may be encountered." + ) + self.self_attn = QWEN2_5_VL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + self.mlp = Qwen2MLP(config) + self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. + position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): + Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, + with `head_dim` being the embedding dimension of each attention head. + kwargs (`dict`, *optional*): + Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code + into the model + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +@add_start_docstrings( + "The bare Qwen2_5_VL Model outputting raw hidden-states without any specific head on top.", + Qwen2_5_VL_START_DOCSTRING, +) +class Qwen2_5_VLModel(Qwen2_5_VLPreTrainedModel): + def __init__(self, config: Qwen2_5_VLConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [Qwen2_5_VLDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.rotary_emb = Qwen2_5_VLRotaryEmbedding(config=config) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # torch.jit.trace() doesn't support cache objects in the output + if use_cache and past_key_values is None and not torch.jit.is_tracing(): + past_key_values = DynamicCache() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + # the hard coded `3` is for temporal, height and width. + if position_ids is None: + position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) + elif position_ids.dim() == 2: + position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + hidden_states = inputs_embeds + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + position_embeddings, + use_reentrant=False + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_key_values: Cache, + output_attentions: bool = False, + ): + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and past_key_values is not None: + is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] + if is_padding_right: + raise ValueError( + "You are attempting to perform batched generation with padding_side='right'" + " this may lead to unexpected behaviour for Flash Attention version of Qwen2_5_VL. Make sure to " + " call `tokenizer.padding_side = 'left'` before tokenizing the input. " + ) + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in + # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail + # to infer the attention mask. + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + using_static_cache = isinstance(past_key_values, StaticCache) + using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) + + # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward + if ( + self.config._attn_implementation == "sdpa" + and not (using_static_cache or using_sliding_window_cache) + and not output_attentions + ): + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, + inputs_embeds=input_tensor, + past_key_values_length=past_seen_tokens, + sliding_window=self.config.sliding_window, + is_training=self.training, + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + # SlidingWindowCache or StaticCache + if using_sliding_window_cache or using_static_cache: + target_length = past_key_values.get_max_cache_shape() + # DynamicCache or no cache + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). + causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=target_length, + dtype=dtype, + device=device, + cache_position=cache_position, + batch_size=input_tensor.shape[0], + config=self.config, + past_key_values=past_key_values, + ) + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type in ["cuda", "xpu"] + and not output_attentions + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + @staticmethod + def _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask: torch.Tensor, + sequence_length: int, + target_length: int, + dtype: torch.dtype, + device: torch.device, + cache_position: torch.Tensor, + batch_size: int, + config: Qwen2_5_VLConfig, + past_key_values: Cache, + ): + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. + + Args: + attention_mask (`torch.Tensor`): + A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. + sequence_length (`int`): + The sequence length being processed. + target_length (`int`): + The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. + dtype (`torch.dtype`): + The dtype to use for the 4D attention mask. + device (`torch.device`): + The device to place the 4D attention mask on. + cache_position (`torch.Tensor`): + Indices depicting the position of the input sequence tokens in the sequence. + batch_size (`torch.Tensor`): + Batch size. + config (`Qwen2_5_VLConfig`): + The model's configuration class + past_key_values (`Cache`): + The cache class that is being used currently to generate + """ + if attention_mask is not None and attention_mask.dim() == 4: + # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. + causal_mask = attention_mask + else: + min_dtype = torch.finfo(dtype).min + causal_mask = torch.full( + (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device + ) + diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + if config.sliding_window is not None: + # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also + # the check is needed to verify is current checkpoint was trained with sliding window or not + if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length: + sliding_attend_mask = torch.arange(target_length, device=device) <= ( + cache_position.reshape(-1, 1) - config.sliding_window + ) + diagonal_attend_mask.bitwise_or_(sliding_attend_mask) + causal_mask *= diagonal_attend_mask + causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + if attention_mask.shape[-1] > target_length: + attention_mask = attention_mask[:, :target_length] + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( + causal_mask.device + ) + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + return causal_mask + + +@dataclass +class Qwen2_5_VLCausalLMOutputWithPast(ModelOutput): + """ + Base class for Qwen2_5_VL causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): + The rope index difference between sequence length and multimodal rope. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[List[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + rope_deltas: Optional[torch.LongTensor] = None + + +QWEN2_5_VL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + pixel_values (`torch.FloatTensor` of shape `(seq_length, num_channels * image_size * image_size)): + The tensors corresponding to the input images. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`Qwen2_5_VLImageProcessor.__call__`] for details. [`Qwen2_5_VLProcessor`] uses + [`Qwen2_5_VLImageProcessor`] for processing images. + pixel_values_videos (`torch.FloatTensor` of shape `(seq_length, num_channels * temporal_size * image_size * image_size)): + The tensors corresponding to the input videos. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`Qwen2_5_VLImageProcessor.__call__`] for details. [`Qwen2_5_VLProcessor`] uses + [`Qwen2_5_VLImageProcessor`] for processing videos. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): + The rope index difference between sequence length and multimodal rope. +""" + + +class Qwen2_5_VLForConditionalGeneration(Qwen2_5_VLPreTrainedModel, GenerationMixin): + _tied_weights_keys = ["lm_head.weight"] + config_class = Qwen2_5_VLConfig + _no_split_modules = ["Qwen2_5_VLDecoderLayer", "Qwen2_5_VLVisionBlock"] + + def __init__(self, config): + super().__init__(config) + self.visual = Qwen2_5_VisionTransformerPretrainedModel._from_config(config.vision_config) + self.model = Qwen2_5_VLModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.rope_deltas = None # cache rope_deltas here + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + def get_rope_index( + self, + input_ids: Optional[torch.LongTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + second_per_grid_ts: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Calculate the 3D rope index based on image and video's temporal, height and width in LLM. + + Explanation: + Each embedding sequence contains vision embedding and text embedding or just contains text embedding. + + For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. + Examples: + input_ids: [T T T T T], here T is for text. + temporal position_ids: [0, 1, 2, 3, 4] + height position_ids: [0, 1, 2, 3, 4] + width position_ids: [0, 1, 2, 3, 4] + + For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part + and 1D rotary position embedding for text part. + Examples: + Temporal (Time): 3 patches, representing different segments of the video in time. + Height: 2 patches, dividing each frame vertically. + Width: 2 patches, dividing each frame horizontally. + We also have some important parameters: + fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. + tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. + temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. + interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. + input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. + vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] + vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] + vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] + text temporal position_ids: [101, 102, 103, 104, 105] + text height position_ids: [101, 102, 103, 104, 105] + text width position_ids: [101, 102, 103, 104, 105] + Here we calculate the text start position_ids as the max vision position_ids plus 1. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): + The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + Returns: + position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) + mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) + """ + spatial_merge_size = self.config.vision_config.spatial_merge_size + image_token_id = self.config.image_token_id + video_token_id = self.config.video_token_id + vision_start_token_id = self.config.vision_start_token_id + mrope_position_deltas = [] + if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): + total_input_ids = input_ids + if attention_mask is None: + attention_mask = torch.ones_like(total_input_ids) + position_ids = torch.ones( + 3, + input_ids.shape[0], + input_ids.shape[1], + dtype=input_ids.dtype, + device=input_ids.device, + ) + image_index, video_index = 0, 0 + attention_mask = attention_mask.to(total_input_ids.device) + for i, input_ids in enumerate(total_input_ids): + input_ids = input_ids[attention_mask[i] == 1] + image_nums, video_nums = 0, 0 + vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) + vision_tokens = input_ids[vision_start_indices + 1] + image_nums = (vision_tokens == image_token_id).sum() + video_nums = (vision_tokens == video_token_id).sum() + input_tokens = input_ids.tolist() + llm_pos_ids_list: list = [] + st = 0 + remain_images, remain_videos = image_nums, video_nums + for _ in range(image_nums + video_nums): + if image_token_id in input_tokens and remain_images > 0: + ed_image = input_tokens.index(image_token_id, st) + else: + ed_image = len(input_tokens) + 1 + if video_token_id in input_tokens and remain_videos > 0: + ed_video = input_tokens.index(video_token_id, st) + else: + ed_video = len(input_tokens) + 1 + if ed_image < ed_video: + t, h, w = ( + image_grid_thw[image_index][0], + image_grid_thw[image_index][1], + image_grid_thw[image_index][2], + ) + second_per_grid_t = 0 + image_index += 1 + remain_images -= 1 + ed = ed_image + + else: + t, h, w = ( + video_grid_thw[video_index][0], + video_grid_thw[video_index][1], + video_grid_thw[video_index][2], + ) + if second_per_grid_ts is not None: + second_per_grid_t = second_per_grid_ts[video_index] + else: + second_per_grid_t = 1.0 + video_index += 1 + remain_videos -= 1 + ed = ed_video + llm_grid_t, llm_grid_h, llm_grid_w = ( + t.item(), + h.item() // spatial_merge_size, + w.item() // spatial_merge_size, + ) + text_len = ed - st + + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) + + range_tensor = torch.arange(llm_grid_t).view(-1, 1) + expanded_range = range_tensor.expand(-1, llm_grid_h * llm_grid_w) + + time_tensor = expanded_range * second_per_grid_t * self.config.vision_config.tokens_per_second + + time_tensor_long = time_tensor.long() + t_index = time_tensor_long.flatten() + + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() + llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) + st = ed + llm_grid_t * llm_grid_h * llm_grid_w + + if st < len(input_tokens): + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + text_len = len(input_tokens) - st + llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) + + llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) + position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) + mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) + mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) + return position_ids, mrope_position_deltas + else: + if attention_mask is not None: + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) + max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] + mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] + else: + position_ids = ( + torch.arange(input_ids.shape[1], device=input_ids.device) + .view(1, 1, -1) + .expand(3, input_ids.shape[0], -1) + ) + mrope_position_deltas = torch.zeros( + [input_ids.shape[0], 1], + device=input_ids.device, + dtype=input_ids.dtype, + ) + + return position_ids, mrope_position_deltas + + @add_start_docstrings_to_model_forward(QWEN2_5_VL_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Qwen2_5_VLCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + second_per_grid_ts: Optional[torch.Tensor] = None, + ) -> Union[Tuple, Qwen2_5_VLCausalLMOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration + + >>> model = Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") + >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") + + >>> messages = [ + { + "role": "user", + "content": [ + {"type": "image"}, + {"type": "text", "text": "What is shown in this image?"}, + ], + }, + ] + >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + >>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos]) + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is None: + inputs_embeds = self.model.embed_tokens(input_ids) + if pixel_values is not None: + pixel_values = pixel_values.type(self.visual.dtype) + image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) + n_image_tokens = (input_ids == self.config.image_token_id).sum().item() + n_image_features = image_embeds.shape[0] + if n_image_tokens != n_image_features: + raise ValueError( + f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" + ) + + mask = input_ids == self.config.image_token_id + mask_unsqueezed = mask.unsqueeze(-1) + mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) + image_mask = mask_expanded.to(inputs_embeds.device) + + image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) + inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) + + if pixel_values_videos is not None: + pixel_values_videos = pixel_values_videos.type(self.visual.dtype) + video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) + n_video_tokens = (input_ids == self.config.video_token_id).sum().item() + n_video_features = video_embeds.shape[0] + if n_video_tokens != n_video_features: + raise ValueError( + f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}" + ) + + mask = input_ids == self.config.video_token_id + mask_unsqueezed = mask.unsqueeze(-1) + mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) + video_mask = mask_expanded.to(inputs_embeds.device) + + video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) + inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) + + if attention_mask is not None: + attention_mask = attention_mask.to(inputs_embeds.device) + + # if we get 4D attention mask we cannot calculate rope deltas anymore. TODO @raushan fixme + if position_ids is None and (attention_mask is None or attention_mask.ndim == 2): + # calculate RoPE index once per generation in the pre-fill stage only + if ( + (cache_position is not None and cache_position[0] == 0) + or self.rope_deltas is None + or (past_key_values is None or past_key_values.get_seq_length() == 0) + ): + position_ids, rope_deltas = self.get_rope_index( + input_ids, + image_grid_thw, + video_grid_thw, + second_per_grid_ts, + attention_mask, + ) + self.rope_deltas = rope_deltas + # then use the prev pre-calculated rope-deltas to get the correct position ids + else: + batch_size, seq_length, _ = inputs_embeds.shape + delta = ( + (cache_position[0] + self.rope_deltas).to(inputs_embeds.device) + if cache_position is not None + else 0 + ) + position_ids = torch.arange(seq_length, device=inputs_embeds.device) + position_ids = position_ids.view(1, -1).expand(batch_size, -1) + if cache_position is not None: # otherwise `deltas` is an int `0` + delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) + position_ids = position_ids.add(delta) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) + + outputs = self.model( + input_ids=None, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Upcast to float if we need to compute the loss to avoid potential precision issues + logits = logits.float() + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return Qwen2_5_VLCausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + rope_deltas=self.rope_deltas, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + cache_position=None, + position_ids=None, + use_cache=True, + pixel_values=None, + pixel_values_videos=None, + image_grid_thw=None, + video_grid_thw=None, + second_per_grid_ts=None, + **kwargs, + ): + # Overwritten -- in specific circumstances we don't want to forward image inputs to the model + + model_inputs = super().prepare_inputs_for_generation( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + cache_position=cache_position, + position_ids=position_ids, + pixel_values=pixel_values, + pixel_values_videos=pixel_values_videos, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + second_per_grid_ts=second_per_grid_ts, + use_cache=use_cache, + **kwargs, + ) + + # Qwen2-5-VL position_ids are prepareed with rope_deltas in forward + model_inputs["position_ids"] = None + + if cache_position[0] != 0: + model_inputs["pixel_values"] = None + model_inputs["pixel_values_videos"] = None + + return model_inputs + + def _get_image_nums_and_video_nums( + self, + input_ids: Optional[torch.LongTensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Get the number of images and videos for each sample to calculate the separation length of the sample tensor. + These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Returns: + image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`) + video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`) + """ + image_token_id = self.config.image_token_id + video_token_id = self.config.video_token_id + vision_start_token_id = self.config.vision_start_token_id + + vision_start_mask = input_ids == vision_start_token_id + vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1) + image_mask = input_ids == image_token_id + video_mask = input_ids == video_token_id + image_nums = torch.sum(vision_first_mask & image_mask, dim=1) + video_nums = torch.sum(vision_first_mask & video_mask, dim=1) + + return image_nums, video_nums + + def _expand_inputs_for_generation( + self, + expand_size: int = 1, + is_encoder_decoder: bool = False, + input_ids: Optional[torch.LongTensor] = None, + **model_kwargs, + ) -> Tuple[torch.LongTensor, Dict[str, Any]]: + # Overwritten -- Support for expanding tensors without a batch size dimension + # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t + # pixel_values.shape[0] is sum(seqlen_images for samples) + # image_grid_thw.shape[0] is sum(num_images for samples) + + if expand_size == 1: + return input_ids, model_kwargs + + visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"] + + def _expand_dict_for_generation_visual(dict_to_expand): + image_grid_thw = model_kwargs.get("image_grid_thw", None) + video_grid_thw = model_kwargs.get("video_grid_thw", None) + image_nums, video_nums = self._get_image_nums_and_video_nums(input_ids) + + def _repeat_interleave_samples(x, lengths, repeat_times): + samples = torch.split(x, lengths) + repeat_args = [repeat_times] + [1] * (x.dim() - 1) + result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0) + return result + + for key in dict_to_expand: + if key == "pixel_values": + # split images into samples + samples = torch.split(image_grid_thw, list(image_nums)) + # compute the sequence length of images for each sample + lengths = [torch.prod(sample, dim=1).sum() for sample in samples] + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "image_grid_thw": + # get the num of images for each sample + lengths = list(image_nums) + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "pixel_values_videos": + samples = torch.split(video_grid_thw, list(video_nums)) + lengths = [torch.prod(sample, dim=1).sum() for sample in samples] + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "video_grid_thw": + lengths = list(video_nums) + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "second_per_grid_ts": + if not isinstance(dict_to_expand[key], list): + raise TypeError( + f"Expected value for key '{key}' to be a list, but got {type(dict_to_expand[key])} instead." + ) + tensor = torch.tensor(dict_to_expand[key]) + lengths = list(video_nums) + tensor = _repeat_interleave_samples(tensor, lengths=lengths, repeat_times=expand_size) + dict_to_expand[key] = tensor.tolist() + return dict_to_expand + + def _expand_dict_for_generation(dict_to_expand): + for key in dict_to_expand: + if ( + key != "cache_position" + and dict_to_expand[key] is not None + and isinstance(dict_to_expand[key], torch.Tensor) + and key not in visual_keys + ): + dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) + return dict_to_expand + + # input_ids is required for expanding visual inputs + # If input_ids is unavailable, visual inputs will not be used; therefore, there is no need to expand visual inputs. + if input_ids is not None and input_ids.numel() != 0: + model_kwargs = _expand_dict_for_generation_visual(model_kwargs) + + if input_ids is not None: + input_ids = input_ids.repeat_interleave(expand_size, dim=0) + + model_kwargs = _expand_dict_for_generation(model_kwargs) + + if is_encoder_decoder: + if model_kwargs.get("encoder_outputs") is None: + raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") + model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) + + return input_ids, model_kwargs + + +__all__ = ["Qwen2_5_VLForConditionalGeneration", "Qwen2_5_VLModel", "Qwen2_5_VLPreTrainedModel"] diff --git a/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/processing_qwen2_5_vl.py b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/processing_qwen2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..0f72e94622542f7934edd9a1f021f113db7a49ae --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl/processing_qwen2_5_vl.py @@ -0,0 +1,239 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_qwen2_5_vl.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Union + +from transformers.feature_extraction_utils import BatchFeature +from transformers.image_utils import ImageInput, VideoInput +from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs +from transformers.tokenization_utils_base import PreTokenizedInput, TextInput + + +class Qwen2_5_VLVideosProcessorKwargs(VideosKwargs, total=False): + fps: Union[List[float], float] + + +class Qwen2_5_VLImagesKwargs(ImagesKwargs): + min_pixels: Optional[int] + max_pixels: Optional[int] + patch_size: Optional[int] + temporal_patch_size: Optional[int] + merge_size: Optional[int] + + +class Qwen2_5_VLProcessorKwargs(ProcessingKwargs, total=False): + images_kwargs: Qwen2_5_VLImagesKwargs + videos_kwargs: Qwen2_5_VLVideosProcessorKwargs + _defaults = { + "text_kwargs": { + "padding": False, + }, + "videos_kwargs": {"fps": 2.0}, + } + + +class Qwen2_5_VLProcessor(ProcessorMixin): + r""" + Constructs a Qwen2.5-VL processor which wraps a Qwen2.5-VL image processor and a Qwen2 tokenizer into a single processor. + [`Qwen2_5_VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the + [`~Qwen2_5_VLProcessor.__call__`] and [`~Qwen2_5_VLProcessor.decode`] for more information. + Args: + image_processor ([`Qwen2VLImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`Qwen2TokenizerFast`], *optional*): + The tokenizer is a required input. + chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages + in a chat into a tokenizable string. + """ + + attributes = ["image_processor", "tokenizer"] + valid_kwargs = ["chat_template"] + + image_processor_class = "AutoImageProcessor" + tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs): + self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token + self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token + super().__init__(image_processor, tokenizer, chat_template=chat_template) + + def __call__( + self, + images: ImageInput = None, + text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, + videos: VideoInput = None, + **kwargs: Unpack[Qwen2_5_VLProcessorKwargs], + ) -> BatchFeature: + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to + Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`. + + Args: + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. Both channels-first and channels-last formats are supported. + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch + tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. + - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. + - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. + - **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`. + """ + output_kwargs = self._merge_kwargs( + Qwen2_5_VLProcessorKwargs, + tokenizer_init_kwargs=self.tokenizer.init_kwargs, + **kwargs, + ) + if images is not None: + image_inputs = self.image_processor(images=images, videos=None, **output_kwargs["images_kwargs"]) + image_grid_thw = image_inputs["image_grid_thw"] + else: + image_inputs = {} + image_grid_thw = None + + if videos is not None: + videos_inputs = self.image_processor(images=None, videos=videos, **output_kwargs["images_kwargs"]) + video_grid_thw = videos_inputs["video_grid_thw"] + + fps = output_kwargs["videos_kwargs"].pop("fps", 2.0) + if isinstance(fps, (int, float)): + second_per_grid_ts = [self.image_processor.temporal_patch_size / fps] * len(video_grid_thw) + elif hasattr(fps, "__len__") and len(fps) == len(video_grid_thw): + second_per_grid_ts = [self.image_processor.temporal_patch_size / tmp for tmp in fps] + else: + raise ValueError( + f"The length of fps ({len(fps) if hasattr(fps, '__len__') else fps}) must be equal to the length of video_grid_thw ({len(video_grid_thw)}) or fps should be a single number." + ) + videos_inputs.update({"second_per_grid_ts": second_per_grid_ts}) + + else: + videos_inputs = {} + video_grid_thw = None + + if not isinstance(text, list): + text = [text] + + if image_grid_thw is not None: + merge_length = self.image_processor.merge_size**2 + index = 0 + for i in range(len(text)): + while self.image_token in text[i]: + text[i] = text[i].replace( + self.image_token, + "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length), + 1, + ) + index += 1 + text[i] = text[i].replace("<|placeholder|>", self.image_token) + + if video_grid_thw is not None: + merge_length = self.image_processor.merge_size**2 + index = 0 + for i in range(len(text)): + while self.video_token in text[i]: + text[i] = text[i].replace( + self.video_token, + "<|placeholder|>" * (video_grid_thw[index].prod() // merge_length), + 1, + ) + index += 1 + text[i] = text[i].replace("<|placeholder|>", self.video_token) + + text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) + + return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + def post_process_image_text_to_text( + self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs + ): + """ + Post-process the output of the model to decode the text. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + Clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `List[str]`: The decoded text. + """ + return self.tokenizer.batch_decode( + generated_outputs, + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + names_from_processor = list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + return names_from_processor + ["second_per_grid_ts"] + + +__all__ = ["Qwen2_5_VLProcessor"] \ No newline at end of file diff --git a/vlm_fo1/model/multimodal_encoder/qwen2_5_vl_encoder.py b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..664b3c778c63ff9995087b4bbbe1a7e0ed6dcde2 --- /dev/null +++ b/vlm_fo1/model/multimodal_encoder/qwen2_5_vl_encoder.py @@ -0,0 +1,301 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from vlm_fo1.model.multimodal_encoder.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VisionTransformerPretrainedModel +from transformers.models.qwen2_vl.image_processing_qwen2_vl import Qwen2VLImageProcessor +from torchvision.transforms import ToPILImage + +class VisionFeaturesGather: + """ + Collects and manages intermediate features for multi-level visual representation extraction + (used for region feature/ROIAlign task). Each forward pass (per image) builds up a list of features. + """ + def __init__(self) -> None: + self.features_list = [] + self.grid_thw = None + self.window_index = None + self.merge_size = None + + def reset(self): + """Clear all states before starting a new feature-gathering process.""" + self.features_list.clear() + self.grid_thw = None + self.window_index = None + self.merge_size = None + + def set_params(self, grid_thw, window_index, merge_size): + """Store spatial and merge information for the current image or batch.""" + self.grid_thw = grid_thw + self.window_index = window_index + self.merge_size = merge_size + + def append(self, element): + """Append a set of features (typically per layer in encoder).""" + self.features_list.append(element) + + def extract_multi_level_features(self): + """ + Assemble all gathered multi-level features into canonical tensor forms. + + The goal: for each visual sample, produce a list of region-aligned feature maps + (e.g., multiple stage outputs for downstream region patching/ROIAlign). + + Returns: + List of features, where each element is a list [stage1, stage2, ...] for one image. + """ + # Concatenate all feature tensors along hidden dimension: [seq_len, hidden_size * k] + concat_features = torch.cat(self.features_list, dim=1) + merge_unit = self.merge_size * self.merge_size + seq_len = concat_features.shape[0] + + # Rearrange into [windows, merge_unit, hidden_dim*layers] + concat_features = concat_features.reshape(seq_len // merge_unit, merge_unit, -1) + reverse_indices = torch.argsort(self.window_index) + concat_features = concat_features[reverse_indices, :, :] + concat_features = concat_features.reshape(seq_len, -1) + + # Split features for each image/video by product of grid h and w (per sample) + split_size = (self.grid_thw[:, 1] * self.grid_thw[:, 2]).tolist() + split_features = list(torch.split(concat_features, split_size, dim=0)) + assert len(split_features) == self.grid_thw.shape[0] + for i in range(len(split_features)): + # Recover original grid shape and merge windowing into stages, then split + _, grid_h, grid_w = self.grid_thw[i] + merge_h = grid_h // self.merge_size + merge_w = grid_w // self.merge_size + split_features[i] = split_features[i].reshape(merge_h, merge_w, merge_unit, -1) + split_features[i] = split_features[i].reshape(merge_h, merge_w, self.merge_size, self.merge_size, -1) + split_features[i] = split_features[i].permute(0, 2, 1, 3, 4) + split_features[i] = split_features[i].flatten(start_dim=0, end_dim=-2) + # Split [h, w, dim] into k tensors [1, dim/k, h, w] (for compatibility with multi-stage vision encoding) + hidden_dim = split_features[i].shape[-1] + split_dim = hidden_dim // len(self.features_list) + split_features[i] = split_features[i].reshape(grid_h, grid_w, -1) + split_features[i] = [ + split_features[i][..., j*split_dim:(j+1)*split_dim].permute(2, 0, 1).unsqueeze(0) + for j in range(len(self.features_list)) + ] + + return split_features + +# Global gather object to pass into Qwen2_5_VisionTransformer for monkey-patched feature gathering +GATHER = VisionFeaturesGather() + +# --------------------------------- Monkey Patch --------------------------------------- +def custom_forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor: + """ + Custom forward used with monkey patch to support multi-level feature extraction. + Applies patch embedding, window partition, position embedding, and passes through all blocks. + Optionally collects features at each 'fullatt' block for multi-region support. + + Args: + hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): + The final hidden states of the model. + grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): + Temporal, height, width of each feature sequence. + + Returns: + `torch.Tensor`: Final hidden states after MLP head (merger). + """ + hidden_states = self.patch_embed(hidden_states) + rotary_pos_emb = self.rot_pos_emb(grid_thw) + window_index, cu_window_seqlens = self.get_window_index(grid_thw) + cu_window_seqlens = torch.tensor( + cu_window_seqlens, + device=hidden_states.device, + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) + + seq_len, _ = hidden_states.size() + hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + hidden_states = hidden_states[window_index, :, :] + hidden_states = hidden_states.reshape(seq_len, -1) + rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + rotary_pos_emb = rotary_pos_emb[window_index, :, :] + rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + position_embeddings = (emb.cos(), emb.sin()) + + cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( + dim=0, + # FA2 requires that cu_seqlens_q must have dtype int32 + # torch.onnx.export requires that cu_seqlens_q must match grid_thw dtype + # See https://github.com/huggingface/transformers/pull/34852 for more info + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) + + # If monkey-patched feature gather enabled, prepare to collect intermediate features + if hasattr(self, 'vision_features_gather'): + self.vision_features_gather.reset() + self.vision_features_gather.set_params(grid_thw, window_index, self.spatial_merge_size) + + # Forward pass through all transformer blocks; collect intermediate features if needed + for layer_num, blk in enumerate(self.blocks): + if layer_num in self.fullatt_block_indexes: + cu_seqlens_now = cu_seqlens + else: + cu_seqlens_now = cu_window_seqlens + if self.gradient_checkpointing and self.training: + hidden_states = self._gradient_checkpointing_func( + blk.__call__, hidden_states, cu_seqlens_now, None, position_embeddings, use_reentrant=False + ) + else: + hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings) + + if hasattr(self, 'vision_features_gather'): + # Capture hidden states at all 'full attention' blocks as multi-level features + if layer_num in self.fullatt_block_indexes: + # This property is set by monkey patching + self.vision_features_gather.append(hidden_states.clone()) + + hidden_states = self.merger(hidden_states) + reverse_indices = torch.argsort(window_index) + hidden_states = hidden_states[reverse_indices, :] + + return hidden_states + +def init_vision_features_gather(self, vision_features_gather): + """ + Helper method for monkey patch to inject a VisionFeaturesGather instance into model. + """ + self.vision_features_gather = vision_features_gather + +def replace_qwen_vit_forward(): + """ + Monkey-patch Qwen2_5_VisionTransformer to use custom forward with multi-level feature support. + """ + Qwen2_5_VisionTransformerPretrainedModel.forward = custom_forward + Qwen2_5_VisionTransformerPretrainedModel.init_vision_features_gather = init_vision_features_gather + + +class Qwen2_5_VlVisionTower(nn.Module): + """ + Vision backbone wrapper for Qwen2.5-VL (Vision Transformer). + Handles both standard and region-level (multi-level) encoding with optional monkey patch logic. + """ + def __init__(self, image_tower, args, delay_load=False, min_pixels=56*56, max_pixels=2048*2048): + super().__init__() + + self.is_loaded = False + + self.image_tower_name = image_tower + + # Determine if multi-level region feature is to be enabled (monkey patch required) + self.use_vision_tower_region_feature = getattr(args, 'mm_use_vision_tower_region_feature', False) + if self.use_vision_tower_region_feature: + replace_qwen_vit_forward() # Monkey patch: add multi-level feature extraction logic + self.min_pixels = min_pixels + self.max_pixels = max_pixels + self.delay_load = delay_load + print (f"Qwen2_5_VlVisionTower loading_info: delay_load: {delay_load} min_pixels: {min_pixels} max_pixels: {max_pixels}") + + # if not delay_load: + # self.load_model() + # else: + # # Defer actual model loading to support (e.g.) model parallel or delayed download scenarios + # self.cfg_only = args.vision_config + self.cfg_only = args.vision_config + self.load_model(model_path=args.name_or_path) + + def load_model(self, model_path=None, image_size=336, is_train=True): + """ + Actually load Qwen2.5 Vision Tower backbone and processor. + Sets up the image tower and patch feed pipeline. + """ + self.image_tower = Qwen2_5_VisionTransformerPretrainedModel._from_config(self.cfg_only, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16) + # print(f'Qwen2_5_VlVisionTower loading_info: {loading_info}') + + if model_path is not None: + self.image_processor = Qwen2VLImageProcessor.from_pretrained(model_path, min_pixels=self.min_pixels, max_pixels=self.max_pixels) + else: + self.image_processor = Qwen2VLImageProcessor.from_pretrained(self.image_tower_name, min_pixels=self.min_pixels, max_pixels=self.max_pixels) + + if self.use_vision_tower_region_feature: + # Setup gather instance for monkey-patched feature extraction + self.image_tower.init_vision_features_gather(GATHER) + self.is_loaded = True + + def convert_image_format(self, image): + """ + Convert raw image tensor to pre-processed model input tensor and grid shape, using appropriate processor. + Handles PIL conversion and applies preprocessor for Qwen2.5-VL. + """ + pil_image = ToPILImage()(image) + inputs = self.image_processor(images=pil_image, videos=None, return_tensors="pt") + return inputs['pixel_values'], inputs['image_grid_thw'] + + def forward(self, images, image_grid_thws=[]): + """ + Forward pass for a batch (list) of images. + Returns image features, gridTHWs, and optional multi-level features for each input image. + """ + if type(images) is list: + image_features = [] + multi_level_features_list = [] + output_image_grid_thws = [] + + for i, image in enumerate(images): + # If no grid provided, convert and infer via processor + if image_grid_thws is None or len(image_grid_thws) == 0: + image, image_grid_thw = self.convert_image_format(image=image) + else: + image_grid_thw = image_grid_thws[i] + image_forward_out = self.image_tower(image.to(device=self.device, dtype=self.dtype), grid_thw=image_grid_thw.to(device=self.device)) + image_feature = image_forward_out.unsqueeze(0).to(self.dtype) + + image_features.append(image_feature) + output_image_grid_thws.append(image_grid_thw) + + # If region feature mode enabled, collect multi-level features for this image + if self.use_vision_tower_region_feature: + multi_level_features_list.append(self.get_multi_level_features()[0]) + + else: + raise NotImplementedError("Qwen2_5_VlVisionTower only supports list-of-image input") + + return image_features, output_image_grid_thws, multi_level_features_list + + def get_multi_level_features(self): + """ + Get the current (last-processed) multi-level region features from the VisionFeaturesGather helper. + Used in region-feature/ROIAlign branches. + """ + multi_level_features = self.image_tower.vision_features_gather.extract_multi_level_features() + return multi_level_features + + @property + def dummy_feature(self): + """Returns a zero-vector feature, for use as fallback/null visual token.""" + return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) + + @property + def dtype(self): + """Report vision tower's expected/active tensor dtype (inferred from real weights).""" + return self.image_tower.dtype + + @property + def device(self): + """Report vision tower's tensor device (cuda/cpu) for autoflow/compatibility.""" + return self.image_tower.device + + @property + def config(self): + """Yield config, for both loaded-and-ready and 'config only' modes (delay load etc).""" + if self.is_loaded: + return self.image_tower.config + else: + return self.cfg_only + + @property + def hidden_size(self): + """Return backbone output hidden size (for proj or post-processing modules).""" + return self.config.out_hidden_size + + @property + def num_patches(self): + """Return number of vision tokens (patches) in processed image.""" + return (self.config.image_size // self.config.patch_size) ** 2 + diff --git a/vlm_fo1/model/multimodal_projector/builder.py b/vlm_fo1/model/multimodal_projector/builder.py new file mode 100755 index 0000000000000000000000000000000000000000..0d0b271480c89d6b6c63d03533c9fe0f36ddfa44 --- /dev/null +++ b/vlm_fo1/model/multimodal_projector/builder.py @@ -0,0 +1,222 @@ +import torch +import torch.nn as nn +import re +from .honeybee import CAbstractor +from functools import partial +import numpy as np +from torch.nn.init import trunc_normal_ +from torch.nn import functional as F +import math + + +class IdentityMap(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, *args, **kwargs): + return x + + @property + def config(self): + return {"mm_projector_type": 'identity'} + + +class SimpleResBlock(nn.Module): + def __init__(self, channels): + super().__init__() + self.pre_norm = nn.LayerNorm(channels) + + self.proj = nn.Sequential( + nn.Linear(channels, channels), + nn.GELU(), + nn.Linear(channels, channels) + ) + def forward(self, x): + x = self.pre_norm(x) + return x + self.proj(x) + + +def build_vision_projector(config, delay_load=False, **kwargs): + projector_type = getattr(config, 'mm_projector_type', 'linear') + + if projector_type == 'linear': + return nn.Linear(config.mm_hidden_size, config.hidden_size) + if projector_type == "cabstract": + n_query = getattr(config, 'mm_projector_n_query', None) + image_size = getattr(config, 'image_size', None) + if not n_query: + n_query = kwargs.get("mm_projector_n_query",144) + if not image_size: + image_size = kwargs.get("image_size",336) + vokens = int(image_size/14*image_size/14) + print ("n_query",n_query) + print ("image_size",image_size) + print ("vokens",vokens) + + return CAbstractor(vokens, config.mm_hidden_size, config.hidden_size, num_queries=n_query) + + if projector_type == "tokenpacker": + #TokenPacker(hidden_size=config.hidden_size, scale_factor=config.scale_factor) + image_size = kwargs.get("image_size",448) + return TokenPacker(hidden_size=config.hidden_size, mm_hidden_size=config.mm_hidden_size, raw_grid=int(image_size/14)) + + + mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) + if mlp_gelu_match: + mlp_depth = int(mlp_gelu_match.group(1)) + modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] + for _ in range(1, mlp_depth): + modules.append(nn.GELU()) + modules.append(nn.Linear(config.hidden_size, config.hidden_size)) + return nn.Sequential(*modules) + + if projector_type == 'identity': + return IdentityMap() + + raise ValueError(f'Unknown projector type: {projector_type}') + +def build_vision_projector_aux(config, delay_load=False, **kwargs): + projector_type = getattr(config, 'mm_projector_aux_type', 'linear') + + if projector_type == 'linear': + return nn.Linear(config.mm_region_hidden_size, config.hidden_size) + if projector_type == "cabstract": + n_query = getattr(config, 'mm_projector_n_query', None) + image_size = getattr(config, 'image_size', None) + if not n_query: + n_query = kwargs.get("mm_projector_n_query",144) + if not image_size: + image_size = kwargs.get("image_size",336) + vokens = int(image_size/14*image_size/14) + print ("n_query",n_query) + print ("image_size",image_size) + print ("vokens",vokens) + + return CAbstractor(vokens, config.mm_region_hidden_size, config.hidden_size, num_queries=n_query) + + if projector_type == "tokenpacker": + #TokenPacker(hidden_size=config.hidden_size, scale_factor=config.scale_factor) + image_size = kwargs.get("image_size",448) + return TokenPacker(hidden_size=config.hidden_size, mm_hidden_size=config.mm_region_hidden_size, raw_grid=int(image_size/14)) + + + mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) + if mlp_gelu_match: + mlp_depth = int(mlp_gelu_match.group(1)) + modules = [nn.Linear(config.mm_region_hidden_size, config.hidden_size)] + for _ in range(1, mlp_depth): + modules.append(nn.GELU()) + modules.append(nn.Linear(config.hidden_size, config.hidden_size)) + return nn.Sequential(*modules) + + if projector_type == 'identity': + return IdentityMap() + + raise ValueError(f'Unknown projector type: {projector_type}') + +class TokenPacker(nn.Module): + def __init__( + self, + raw_grid=32, + embed_dim=1024, + num_heads=1024//128, + hidden_size=4096, + mm_hidden_size=3200, + scale_factor=2, + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ): + super().__init__() + if raw_grid%scale_factor!=0: + raise ValueError("scale_factor must be divisible by grid size") + self.raw_grid = raw_grid + self.grid_size = raw_grid//scale_factor + self.num_queries = self.grid_size ** 2 + self.embed_dim = embed_dim + self.num_heads = num_heads + self.scale_factor = scale_factor + kv_dim = mm_hidden_size + self.q_proj_1 = nn.Linear(kv_dim, embed_dim, bias=False) + + k_modules = [nn.Linear(mm_hidden_size*4, 1024)] + for _ in range(1,2): + k_modules.append(nn.GELU()) + k_modules.append(nn.Linear(1024, 1024)) + self.k_proj_1 = nn.Sequential(*k_modules) + + v_modules = [nn.Linear(mm_hidden_size*4, 1024)] + for _ in range(1,2): + v_modules.append(nn.GELU()) + v_modules.append(nn.Linear(1024, 1024)) + self.v_proj_1 = nn.Sequential(*v_modules) + + self.ln_q_1 = norm_layer(embed_dim) + self.ln_k_1 = norm_layer(embed_dim) + self.ln_v_1 = norm_layer(embed_dim) + + self.clip_attn = nn.MultiheadAttention(embed_dim, num_heads) + + modules = [nn.Linear(1024, hidden_size)] + for _ in range(1, 2): + modules.append(nn.GELU()) + modules.append(nn.Linear(hidden_size, hidden_size)) + self.mlp = nn.Sequential(*modules) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def divide_feature(self, x, kernel_size, token_num, N, c): + h = w = int(token_num**0.5) + + #print (x.shape) + reshape_x = x.reshape(h, w, N, c).reshape(h//kernel_size, kernel_size, w, N, c) + reshape_x = reshape_x.permute(0,2,1,3,4) + reshape_x = reshape_x.reshape(h//kernel_size, w//kernel_size, kernel_size, kernel_size, N, c) + reshape_x = reshape_x.permute(0,1,3,2,4,5).reshape(h//kernel_size, w//kernel_size, kernel_size*kernel_size, N, c) + reshape_x = reshape_x.permute(2,0,1,3,4).reshape(kernel_size*kernel_size, -1, c) + + return reshape_x + + def forward(self, x, attn_mask=None): + + x_multi = x[1] # mulit-level + x = x[0] # original single-level + + key = self.ln_k_1(self.k_proj_1(x_multi)).permute(1, 0, 2) + value = self.ln_v_1(self.v_proj_1(x_multi)).permute(1, 0, 2) + + token_num, N, c = key.shape + + q = F.interpolate(x.reshape(x.shape[0],self.raw_grid,self.raw_grid,-1).float().permute(0,3,1,2), size=(self.grid_size, self.grid_size), mode='bilinear').permute(0,2,3,1) ## fix + q = q.reshape(q.shape[0], -1, q.shape[-1]).to(x.dtype) + + query = self.ln_q_1(self.q_proj_1(q)).permute(1, 0, 2) + + reshape_query = self.divide_feature(query, 1, self.num_queries, N, c) + reshape_key = self.divide_feature(key, self.scale_factor, token_num, N, c) + reshape_value = self.divide_feature(value, self.scale_factor, token_num, N, value.shape[-1]) + + out = self.clip_attn( + reshape_query, + reshape_key, + reshape_value, + attn_mask=attn_mask)[0] + + x = out + x = x.reshape(self.num_queries, N, -1) + x = x.permute(1, 0, 2) + + x = self.mlp(x) + return x + + def _repeat(self, query, N: int): + return query.unsqueeze(1).repeat(1, N, 1) + + diff --git a/vlm_fo1/model/multimodal_projector/honeybee.py b/vlm_fo1/model/multimodal_projector/honeybee.py new file mode 100644 index 0000000000000000000000000000000000000000..dea1f689ccf29c7024ea9b14edca60d847b636de --- /dev/null +++ b/vlm_fo1/model/multimodal_projector/honeybee.py @@ -0,0 +1,142 @@ +from functools import partial +import torch +import torch.nn as nn +from einops import rearrange +from timm.layers import LayerNorm, LayerNorm2d +from timm.models.regnet import RegStage + + +def build_pos_embeds( + pos_emb: bool, num_input_tokens: int, vision_hidden_size: int +): + # pos emb + if pos_emb: + pos_emb = torch.nn.Parameter(torch.zeros(1, num_input_tokens, vision_hidden_size)) + nn.init.trunc_normal_(pos_emb, mean=0.0, std=0.02) + else: + pos_emb = None + + return pos_emb + +def build_prenorm(prenorm, encoder_hidden_size): + if prenorm: + prenorm = LayerNorm(encoder_hidden_size) + else: + prenorm = None + return prenorm + + +def build_mlp(depth, hidden_size, output_hidden_size): + layers = [nn.Linear(hidden_size, output_hidden_size)] + for _ in range(1, depth): + layers.append(nn.SiLU()) + layers.append(nn.Linear(output_hidden_size, output_hidden_size)) + return nn.Sequential(*layers) + + +class CAbstractor(nn.Module): + """Base projector class""" + + def __init__( + self, + num_input_tokens: int, + encoder_hidden_size: int, + output_hidden_size: int, + hidden_size: int = 1024, + depth: int = 3, + mlp_depth: int = 2, + num_queries: int = 144, + pos_emb: bool = True, + prenorm: bool = False + ): + super().__init__() + self.num_input_tokens = num_input_tokens + self.encoder_hidden_size = encoder_hidden_size + self.output_hidden_size = output_hidden_size + self.mlp_depth = mlp_depth + self.depth = depth + self.num_queries = num_queries + self.hidden_size = hidden_size + + # pos emb + self.pos_emb = build_pos_embeds(pos_emb, num_input_tokens, encoder_hidden_size) + + self.prenorm = build_prenorm(prenorm, encoder_hidden_size) + + self.build_net() + + def build_net(self): + encoder_hidden_size = self.encoder_hidden_size + hidden_size = self.hidden_size + output_hidden_size = self.output_hidden_size + depth = self.depth + mlp_depth = self.mlp_depth + n_queries = self.num_queries + + assert (n_queries ** 0.5).is_integer(), "n_queries must be square number" + hw = int(n_queries ** 0.5) + + # RegBlock = ResBlock + SE + RegBlock = partial( + RegStage, + stride=1, + dilation=1, + act_layer=nn.SiLU, + norm_layer=LayerNorm2d, + ) + + s1 = RegBlock( + depth, + encoder_hidden_size, + hidden_size, + ) + sampler = nn.AdaptiveAvgPool2d((hw, hw)) + s2 = RegBlock( + depth, + hidden_size, + hidden_size, + ) + + self.net = nn.Sequential(s1, sampler, s2) + self.readout = build_mlp(mlp_depth, hidden_size, output_hidden_size) + + def _forward(self, x): + # x: [B, L, dim] + # x = x[:, 1:] # drop cls token and 2d forward @Kyusong, If we output CLS token from vision tower, u can use this + hw = int(x.size(1) ** 0.5) + x = rearrange(x, "b (h w) d -> b d h w", h=hw, w=hw) + x = self.net(x) + x = rearrange(x, "b d h w -> b (h w) d") + x = self.readout(x) + + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: (B, L, encoder_hidden_size) tensor from the visual backbone (CLIP visual encoder), including cls token. + """ + if self.prenorm is not None: + x = self.prenorm(x) + + if self.pos_emb is not None: + x += self.pos_emb + + x = self._forward(x) # (B, L, output_hidden_size) + + return x + + + +if __name__ == "__main__": + B = 2 # batch size + L = 576 # number of input token + H = 1024 # hidden size + + n_query = 256 + output_h = 4096 + + x = torch.FloatTensor(B, L, H) + m = CAbstractor(L, H, output_h, num_queries=n_query) + y = m(x) + print(y.shape) # B, N_Query, output_H diff --git a/vlm_fo1/model/multimodal_visual_prompt_encoder/hybrid_finegrained_region_encoder.py b/vlm_fo1/model/multimodal_visual_prompt_encoder/hybrid_finegrained_region_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..753cf8fbd85530b6efe42c3c3dc9012a518eb658 --- /dev/null +++ b/vlm_fo1/model/multimodal_visual_prompt_encoder/hybrid_finegrained_region_encoder.py @@ -0,0 +1,469 @@ +import torch +import torch.nn as nn +from typing import List, Union +import torch.nn.functional as F +from torchvision.ops import roi_align +import math + +from vlm_fo1.model.multimodal_visual_prompt_encoder.simple_fpn import SimpleFP + + +def generate_2d_position_embedding(height, width, dim, device): + """Generate a 2D positional encoding for a feature map. + + Args: + height (int): Height of the feature map. + width (int): Width of the feature map. + dim (int): Dimensionality of the positional embedding (should match channel count). + device: Torch device on which to allocate tensors. + + Returns: + pos_embed (Tensor): Positional encoding of shape [H, W, dim]. + """ + # Generate grid coordinate vectors of length H and W + y_pos = torch.arange(height, dtype=torch.float32, device=device) + x_pos = torch.arange(width, dtype=torch.float32, device=device) + + # Normalize grid values to [0, 1] + y_pos = y_pos / height + x_pos = x_pos / width + + # Create mesh grid (Y: rows, X: cols) + y_grid, x_grid = torch.meshgrid(y_pos, x_pos, indexing='ij') + + scale = 2 * math.pi + # Calculate positions for sine/cosine encoding + quarter_dim = dim // 4 + dim_t = torch.arange(quarter_dim, dtype=torch.float32, device=device) + dim_t = 10000 ** (2 * (dim_t // 2) / quarter_dim) if quarter_dim > 0 else torch.tensor([1.0], device=device) + + # X direction encoding + x_embed = x_grid.unsqueeze(-1) * scale # [H, W, 1] + pos_x = x_embed / dim_t # [H, W, quarter_dim] + pos_x = torch.stack((pos_x.sin(), pos_x.cos()), dim=-1).flatten(-2) # Alternating sin/cos + + # Y direction encoding + y_embed = y_grid.unsqueeze(-1) * scale # [H, W, 1] + pos_y = y_embed / dim_t # [H, W, quarter_dim] + pos_y = torch.stack((pos_y.sin(), pos_y.cos()), dim=-1).flatten(-2) # Alternating sin/cos + + # Concatenate along the last dimension to make [H, W, dim] + pos_embed = torch.cat([pos_y, pos_x], dim=-1) + + return pos_embed + +def gen_sineembed_for_position(pos_tensor, dim_of_pos_feats): + """Generate sine/cosine positional embedding for ROI position(s). + + Args: + pos_tensor (Tensor): Shape [batch_size, N, 4] (format: [cx, cy, w, h] in normalized [0, 1]) + dim_of_pos_feats (int): Output embedding dimensionality (#positional channels). + + Returns: + pos (Tensor): [batch_size, N, dim_of_pos_feats * (2, 4, ...)] + """ + scale = 2 * math.pi + dim_t = torch.arange( + dim_of_pos_feats, dtype=torch.float32, device=pos_tensor.device + ) + dim_t = 10000 ** (2 * (dim_t // 2) / dim_of_pos_feats) + x_embed = pos_tensor[:, :, 0] * scale + y_embed = pos_tensor[:, :, 1] * scale + + # Generate encodings for cx, cy + pos_x = x_embed[:, :, None] / dim_t + pos_y = y_embed[:, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3 + ).flatten(2) + pos_y = torch.stack( + (pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3 + ).flatten(2) + if pos_tensor.size(-1) == 2: + # [cx, cy] input + pos = torch.cat((pos_y, pos_x), dim=2) + elif pos_tensor.size(-1) == 4: + # [cx, cy, w, h] input + w_embed = pos_tensor[:, :, 2] * scale + pos_w = w_embed[:, :, None] / dim_t + pos_w = torch.stack( + (pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3 + ).flatten(2) + + h_embed = pos_tensor[:, :, 3] * scale + pos_h = h_embed[:, :, None] / dim_t + pos_h = torch.stack( + (pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3 + ).flatten(2) + + # Concatenate encodings for [cy, cx, w, h] + pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) + else: + raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) + return pos + + +class HFREModule(nn.Module): + """Hybrid Finegrained Region Encoder (HFREModule). + + Handles multi-level ROI region features, optional position embedding, and feature combination for hybrid visual prompt encoding. + + Args: + roi_output_size (Optional[int]): Output spatial size for ROIAlign. + region_feature_dim (int): The output dimension for region features. + apply_position_embedding (bool): Whether positional embedding is used in region features. + pos_embedding_strategy (str): 'bbox_based', 'feature_map_based', or 'hybrid'. + use_vt_region_feature_only (bool): Only use vision tower features, skip auxiliary. + use_vision_tower_region_feature (bool): Whether to include vision tower region features. + region_feature_combination (str): Combination method: 'concat', 'mean', etc. + use_separate_mlp_for_regions (bool): Whether to MLP project each region type separately. + apply_region_layer_norm (bool): Whether to apply layernorm to region features. + vision_tower_region_feature_dim (int): #channels for vision-tower region feature. + vision_tower_spatial_scale (float): Spatial scale for vision-tower (for roi_align). + use_simpleFPN_for_vt (bool): Whether to use FPN on the vision-tower output. + aux_vision_tower_region_feature_dims (List[int]): Channel dimensions of auxiliary features list. + aux_vision_tower_spatial_scale (float): Spatial scale for auxiliary vision-tower features. + """ + + def __init__( + self, + roi_output_size: int = None, # Output spatial size for ROI region features + region_feature_dim: int = 1024, # Output dimension for final region feature + apply_position_embedding: bool = False, # Whether to apply position embedding + pos_embedding_strategy: str = 'bbox_based', # Strategy: 'bbox_based', 'feature_map_based', 'hybrid' + use_vt_region_feature_only: bool = False, # Use vision tower (VT) region features only + use_vision_tower_region_feature: bool = False,# Use vision tower region features (with others) + region_feature_combination: str = 'concat', # How to combine aux and vt region features + use_separate_mlp_for_regions: bool = False, # MLP-per-region + apply_region_layer_norm: bool = False, # Apply LayerNorm + + # Primary vision tower related + vision_tower_region_feature_dim: int = 5120, # Dim of the VT region feature + vision_tower_spatial_scale: float = 1/14, # Spatial scale of the VT for roi_align + use_simpleFPN_for_vt: bool = False, # Use simpleFPN for vision tower + + # Auxiliary vision tower related + aux_vision_tower_region_feature_dims: List[int] = [256, 512, 1024, 2048], + aux_vision_tower_spatial_scale: float = None, # Scale for aux VT + ): + super(HFREModule, self).__init__() + self.roi_output_size = roi_output_size + self.region_feature_dim = region_feature_dim + self.apply_position_embedding = apply_position_embedding + self.pos_embedding_strategy = pos_embedding_strategy + self.use_vt_region_feature_only = use_vt_region_feature_only + self.use_vision_tower_region_feature = use_vision_tower_region_feature + self.region_feature_combination = region_feature_combination + self.use_separate_mlp_for_regions = use_separate_mlp_for_regions + self.apply_region_layer_norm = apply_region_layer_norm + + self.vision_tower_region_feature_dim = vision_tower_region_feature_dim + self.vision_tower_spatial_scale = vision_tower_spatial_scale + self.use_simpleFPN_for_vt = use_simpleFPN_for_vt + + self.aux_vision_tower_region_feature_dims = aux_vision_tower_region_feature_dims + self.aux_vision_tower_spatial_scale = aux_vision_tower_spatial_scale + + # Print configuration for debugging + # print(f"output_size: {self.roi_output_size} use_vision_tower_region_feature: {self.use_vision_tower_region_feature} vision_tower_region_feature_dim: {self.vision_tower_region_feature_dim} " + # f"apply_position_embedding: {self.apply_position_embedding} region_feature_combination: {self.region_feature_combination} region_feature_dim: {self.region_feature_dim} use_vt_region_feature_only: {self.use_vt_region_feature_only} " + # f"use_simpleFPN_for_vt: {self.use_simpleFPN_for_vt} pos_embedding_strategy: {self.pos_embedding_strategy} " + # f"apply_region_layer_norm: {self.apply_region_layer_norm}") + + # Optional: FPN for the vision tower input if enabled + if self.use_simpleFPN_for_vt: + self.simple_fpn = SimpleFP(out_channels=512, norm="LN", square_pad=0, dim=1280, stride=14) + + # LayerNorm for auxiliary and VT region features, if enabled + if self.apply_region_layer_norm: + if self.use_vision_tower_region_feature: + self.vt_region_norm = nn.LayerNorm(self.vision_tower_region_feature_dim) + if not self.use_vt_region_feature_only: + self.aux_region_norm = nn.LayerNorm(sum(self.aux_vision_tower_region_feature_dims)) + + # Optionally, a projection MLP if using certain combination strategies + if self.use_vision_tower_region_feature and self.region_feature_combination in ['mean', 'mean_sep_pos', 'mean_aux_pos', 'mean_sep_no_vt_pos']: + self.vision_tower_region_feature_projector = nn.Sequential( + nn.Linear(vision_tower_region_feature_dim, region_feature_dim), + nn.GELU(), + nn.Linear(region_feature_dim, region_feature_dim) + ) + + # Two MLP heads if regions are projected separately (for concat mode) + if self.use_vision_tower_region_feature and self.use_separate_mlp_for_regions: + self.vt_region_mlp = nn.Sequential( + nn.Linear(2048, 1024), + nn.GELU(), + nn.Linear(1024, 1024) + ) + self.aux_region_mlp = nn.Sequential( + nn.Linear(2048, 1024), + nn.GELU(), + nn.Linear(1024, 1024) + ) + + def _apply_feature_map_position_embedding(self, features): + """Apply 2D position embedding to each feature map in a feature pyramid, if enabled. + + Args: + features (List[Tensor]): Each is [B, C, H, W] per FPN level. + + Returns: + List[Tensor]: Feature maps with position embedding applied, shape unchanged. + """ + enhanced_features = [] + for level_idx, feature in enumerate(features): + if self.apply_position_embedding and self.pos_embedding_strategy in ['feature_map_based', 'hybrid']: + B, C, H, W = feature.shape + + # Generate position embedding matching channel dimension + pos_embed = generate_2d_position_embedding( + H, W, C, feature.device + ) # [H, W, C] + + # Reshape to [1, C, H, W] and add + pos_embed = pos_embed.permute(2, 0, 1).unsqueeze(0) + feature = feature + pos_embed.to(feature.dtype) + enhanced_features.append(feature) + return enhanced_features + + def extract_vt_region_feature(self, multi_level_features, boxes: Union[torch.Tensor, List[torch.Tensor]]) -> torch.Tensor: + """Extract vision-tower region features via ROIAlign over FPN features, with spatial scaling. + + Args: + multi_level_features (List[Tensor]): Per-FPN level features, [B, C, H, W]. + boxes (Union[Tensor, List[Tensor]]): ROI bounding boxes for roi_align. + + Returns: + Tensor: [1, N, C=tower_channels] + """ + if self.use_simpleFPN_for_vt: + # If using FPN for vision tower: apply FPN and select fixed spatial scales (hardcoded stride) + multi_level_features = self.simple_fpn(multi_level_features) + roi_features_per_level = [] + # Hardcoded feature strides for each FPN stage; tweak if arch changes + feature_strides = [3.5, 7, 14, 28] + for level_idx, level_feature in enumerate(multi_level_features): + current_spatial_scale = 1.0 / feature_strides[level_idx] + level_roi_feat = roi_align( + level_feature.float(), + boxes, + output_size=self.roi_output_size, + spatial_scale=current_spatial_scale + ) + # Pool across H,W to get region feature per ROI + level_roi_feat = level_roi_feat.mean(dim=(2, 3)) + roi_features_per_level.append(level_roi_feat) + out_box_feat = torch.cat(roi_features_per_level, dim=1).unsqueeze(0) + else: + # If not using FPN: concatenate all feature levels on channel axis and ROI-align once + concat_multi_level_feature = [] + concat_multi_level_feature = torch.cat(multi_level_features, dim=1) + + out_box_feat = roi_align( + concat_multi_level_feature.float(), + boxes, + output_size=self.roi_output_size, + spatial_scale=self.vision_tower_spatial_scale, + ) + # Pool per ROI for (1, N, C_total) + out_box_feat = out_box_feat.mean(dim=(2, 3)).reshape( + 1, out_box_feat.shape[0], out_box_feat.shape[1] + ) + return out_box_feat + + def __call__( + self, + aux_multi_level_features: List[torch.Tensor], + aux_boxes: Union[torch.Tensor, List[torch.Tensor]], + vt_multi_level_features = None, + vt_boxes: Union[torch.Tensor, List[torch.Tensor]] = None, + ) -> torch.Tensor: + """Main forward. Extracts ROI region features with possible hybrid VT/aux, applies position embedding and combines as configured. + + Args: + aux_multi_level_features (List[Tensor]): Auxiliary vision features (e.g., from FPN, [B, C, H, W]). + aux_boxes (Union[Tensor, List[Tensor]]): ROIs in [N, 4] xyxy. + vt_multi_level_features (optional): Vision tower features. + vt_boxes (optional): Vision tower's box coordinates ([N, 4]). + + Returns: + Tensor: Region features of shape [1, N, C], N=#ROIs. + """ + if self.use_vt_region_feature_only: + # Only use VT region features (skip aux completely) + out_box_feat = self.extract_vt_region_feature(vt_multi_level_features, vt_boxes) + + if self.apply_position_embedding: + # Add position embedding to VT region feature + pos_boxes = vt_boxes[0] # (N, 4) + pos_boxes = pos_boxes.to(out_box_feat.dtype) + vt_max_height = max([feature.shape[-2] for feature in vt_multi_level_features]) + vt_max_width = max([feature.shape[-1] for feature in vt_multi_level_features]) + original_img_width = vt_max_width / self.vision_tower_spatial_scale + original_img_height = vt_max_height / self.vision_tower_spatial_scale + # Normalize box coordinates by image size + pos_boxes[:, [0, 2]] = pos_boxes[:, [0, 2]] / original_img_width + pos_boxes[:, [1, 3]] = pos_boxes[:, [1, 3]] / original_img_height + # Convert from (x1, y1, x2, y2) to (cx, cy, w, h) + pos_boxes[:, 2] = pos_boxes[:, 2] - pos_boxes[:, 0] + pos_boxes[:, 3] = pos_boxes[:, 3] - pos_boxes[:, 1] + pos_boxes[:, 0] = pos_boxes[:, 0] + pos_boxes[:, 2] / 2 + pos_boxes[:, 1] = pos_boxes[:, 1] + pos_boxes[:, 3] / 2 + # Add sine/cos position embedding + pos_embed = gen_sineembed_for_position(pos_boxes.unsqueeze(0), self.region_feature_dim // 4) + out_box_feat = out_box_feat + pos_embed + return out_box_feat + + # Otherwise: hybrid mode (aux + possibly VT region features) + aux_boxes[0] = aux_boxes[0].float() + + # Collect all auxiliary features at the same (max) spatial size for channel concat + concat_multi_level_feature = [] + max_height = max([feature.shape[2] for feature in aux_multi_level_features]) + max_width = max([feature.shape[3] for feature in aux_multi_level_features]) + + # Optionally apply 2D position encoding at the feature map level (before concat/roi_align) + if self.pos_embedding_strategy in ['feature_map_based', 'hybrid']: + # Option: compute stride info for each level for debugging/extension + feature_strides = [] + for feature in aux_multi_level_features: + stride = max_height / feature.shape[2] + feature_strides.append(stride) + aux_multi_level_features = self._apply_feature_map_position_embedding( + aux_multi_level_features + ) + + # Interpolate all features to (max_height,max_width), then concat along channel + for level, feature in enumerate(aux_multi_level_features): + if level != 0: + concat_multi_level_feature.append( + F.interpolate( + feature.float(), + size=(max_height, max_width), + mode="bilinear", + align_corners=False, + ) + ) + else: + concat_multi_level_feature.append(feature.float()) + concat_multi_level_feature = torch.cat(concat_multi_level_feature, dim=1) + + # Extract region feature for all boxes using roi_align + out_box_aux_feat = roi_align( + concat_multi_level_feature, + aux_boxes, + output_size=self.roi_output_size, + spatial_scale=self.aux_vision_tower_spatial_scale + ) + + # Pool H,W to get final shape (1, Nbox, C) + out_box_aux_feat = out_box_aux_feat.mean(dim=(2, 3)).reshape( + 1, out_box_aux_feat.shape[0], out_box_aux_feat.shape[1] + ) + + if self.apply_region_layer_norm: + out_box_aux_feat = self.aux_region_norm.float()(out_box_aux_feat) + + if self.use_vision_tower_region_feature: + # If also using vision-tower features + out_box_vt_feat = self.extract_vt_region_feature(vt_multi_level_features, vt_boxes) + if self.apply_region_layer_norm: + out_box_vt_feat = self.vt_region_norm.float()(out_box_vt_feat) + if self.region_feature_combination in ['mean', 'mean_aux_pos']: + # Combine by mean + out_box_feat = (out_box_aux_feat + out_box_vt_feat) / 2 + elif self.region_feature_combination in ['concat', 'concat_aux_pos']: + # Optionally MLP each before concat + if self.use_separate_mlp_for_regions: + original_vt_dtype = out_box_vt_feat.dtype + original_aux_dtype = out_box_aux_feat.dtype + out_box_vt_feat = self.vt_region_mlp(out_box_vt_feat.to(self.vt_region_mlp[0].weight.dtype)).to(original_vt_dtype) + out_box_aux_feat = self.aux_region_mlp(out_box_aux_feat.to(self.aux_region_mlp[0].weight.dtype)).to(original_aux_dtype) + out_box_feat = torch.cat([out_box_aux_feat, out_box_vt_feat], dim=-1) + elif self.region_feature_combination in ['concat_sep_pos', 'mean_sep_pos', 'concat_sep_no_vt_pos', 'mean_sep_no_vt_pos']: + # Compute position embedding separately for aux and vt features + # Use `aux_boxes` for aux and `vt_boxes` for vt + vt_dim = 5120 if self.region_feature_combination == 'concat_sep_pos' else 2880 + + # Aux region: positional embedding using aux_boxes + aux_pos_boxes = aux_boxes[0].to(out_box_aux_feat.dtype) # (N, 4) + aux_original_img_width = max_width / self.aux_vision_tower_spatial_scale + aux_original_img_height = max_height / self.aux_vision_tower_spatial_scale + + aux_pos_boxes[:, [0, 2]] = aux_pos_boxes[:, [0, 2]] / aux_original_img_width + aux_pos_boxes[:, [1, 3]] = aux_pos_boxes[:, [1, 3]] / aux_original_img_height + aux_pos_boxes[:, 2] = aux_pos_boxes[:, 2] - aux_pos_boxes[:, 0] + aux_pos_boxes[:, 3] = aux_pos_boxes[:, 3] - aux_pos_boxes[:, 1] + aux_pos_boxes[:, 0] = aux_pos_boxes[:, 0] + aux_pos_boxes[:, 2] / 2 + aux_pos_boxes[:, 1] = aux_pos_boxes[:, 1] + aux_pos_boxes[:, 3] / 2 + aux_pos_embed = gen_sineembed_for_position( + aux_pos_boxes.unsqueeze(0), 2880 // 4 + ) + out_box_aux_feat = out_box_aux_feat + aux_pos_embed + + # Only apply VT position embedding in these combos: + # For *_no_vt_pos: skip vt feature position embedding + if self.region_feature_combination in ['concat_sep_no_vt_pos', 'mean_sep_no_vt_pos']: + pass + else: + # VT region: positional embedding using vt_boxes + vt_pos_boxes = vt_boxes[0].to(out_box_vt_feat.dtype) # (N, 4) + vt_max_height = max([feature.shape[2] for feature in vt_multi_level_features]) + vt_max_width = max([feature.shape[3] for feature in vt_multi_level_features]) + vt_original_img_width = vt_max_width / self.vision_tower_spatial_scale + vt_original_img_height = vt_max_height / self.vision_tower_spatial_scale + + vt_pos_boxes[:, [0, 2]] = vt_pos_boxes[:, [0, 2]] / vt_original_img_width + vt_pos_boxes[:, [1, 3]] = vt_pos_boxes[:, [1, 3]] / vt_original_img_height + vt_pos_boxes[:, 2] = vt_pos_boxes[:, 2] - vt_pos_boxes[:, 0] + vt_pos_boxes[:, 3] = vt_pos_boxes[:, 3] - vt_pos_boxes[:, 1] + vt_pos_boxes[:, 0] = vt_pos_boxes[:, 0] + vt_pos_boxes[:, 2] / 2 + vt_pos_boxes[:, 1] = vt_pos_boxes[:, 1] + vt_pos_boxes[:, 3] / 2 + vt_pos_embed = gen_sineembed_for_position( + vt_pos_boxes.unsqueeze(0), vt_dim // 4 + ) + out_box_vt_feat = out_box_vt_feat + vt_pos_embed + + # Merge aux and vt region features (by cat or mean) + if self.region_feature_combination in ['concat_sep_pos', 'concat_sep_no_vt_pos']: + out_box_feat = torch.cat([out_box_aux_feat, out_box_vt_feat], dim=-1) + elif self.region_feature_combination in ['mean_sep_pos', 'mean_sep_no_vt_pos']: + out_box_feat = (out_box_aux_feat + out_box_vt_feat) / 2 + + + # If enabled: add single positional embedding (bbox-based, not separate for each region type) + if self.apply_position_embedding and self.region_feature_combination not in ['concat_sep_pos', 'mean_sep_pos', 'concat_sep_no_vt_pos', 'mean_sep_no_vt_pos']: + # Only apply if position embedding strategy matches + apply_bbox_pos_embed = (self.pos_embedding_strategy == 'bbox_based' or self.pos_embedding_strategy == 'hybrid') + + if apply_bbox_pos_embed: + # Use vt_boxes unless not enabled or configured otherwise + if self.use_vision_tower_region_feature and vt_boxes is not None and self.region_feature_combination not in ['concat_aux_pos', 'mean_aux_pos']: + pos_boxes = vt_boxes[0] # (N, 4) + vt_max_height = max([feature.shape[-2] for feature in vt_multi_level_features]) + vt_max_width = max([feature.shape[-1] for feature in vt_multi_level_features]) + vt_spatial_scale = self.vision_tower_spatial_scale + original_img_width = vt_max_width / vt_spatial_scale + original_img_height = vt_max_height / vt_spatial_scale + else: + max_width = max([feature.shape[3] for feature in aux_multi_level_features]) + max_height = max([feature.shape[2] for feature in aux_multi_level_features]) + pos_boxes = aux_boxes[0] # (N, 4) + original_img_width = max_width / self.aux_vision_tower_spatial_scale + original_img_height = max_height / self.aux_vision_tower_spatial_scale + + pos_boxes = pos_boxes.to(out_box_feat.dtype) + pos_boxes[:, [0, 2]] = pos_boxes[:, [0, 2]] / original_img_width + pos_boxes[:, [1, 3]] = pos_boxes[:, [1, 3]] / original_img_height + # Convert box to center format + pos_boxes[:, 2] = pos_boxes[:, 2] - pos_boxes[:, 0] + pos_boxes[:, 3] = pos_boxes[:, 3] - pos_boxes[:, 1] + pos_boxes[:, 0] = pos_boxes[:, 0] + pos_boxes[:, 2] / 2 + pos_boxes[:, 1] = pos_boxes[:, 1] + pos_boxes[:, 3] / 2 + pos_embed = gen_sineembed_for_position( + pos_boxes.unsqueeze(0), self.region_feature_dim // 4 + ) + out_box_feat = out_box_feat + pos_embed + + return out_box_feat \ No newline at end of file diff --git a/vlm_fo1/model/multimodal_visual_prompt_encoder/simple_fpn.py b/vlm_fo1/model/multimodal_visual_prompt_encoder/simple_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..d44b781fb72574fab4be51d6efab6d5b1cecfd69 --- /dev/null +++ b/vlm_fo1/model/multimodal_visual_prompt_encoder/simple_fpn.py @@ -0,0 +1,257 @@ +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F +import warnings + + +class Conv2d(torch.nn.Conv2d): + """ + A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. + """ + + def __init__(self, *args, **kwargs): + """ + Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: + + Args: + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + + It assumes that norm layer is used before activation. + """ + norm = kwargs.pop("norm", None) + activation = kwargs.pop("activation", None) + super().__init__(*args, **kwargs) + + self.norm = norm + self.activation = activation + + def forward(self, x): + # torchscript does not support SyncBatchNorm yet + # https://github.com/pytorch/pytorch/issues/40507 + # and we skip these codes in torchscript since: + # 1. currently we only support torchscript in evaluation mode + # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or + # later version, `Conv2d` in these PyTorch versions has already supported empty inputs. + if not torch.jit.is_scripting(): + # Dynamo doesn't support context managers yet + is_dynamo_compiling = True + if not is_dynamo_compiling: + with warnings.catch_warnings(record=True): + if x.numel() == 0 and self.training: + # https://github.com/pytorch/pytorch/issues/12013 + assert not isinstance( + self.norm, torch.nn.SyncBatchNorm + ), "SyncBatchNorm does not support empty inputs!" + + x = F.conv2d( + x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + +class LayerNorm(nn.Module): + """ + A LayerNorm variant, popularized by Transformers, that performs point-wise mean and + variance normalization over the channel dimension for inputs that have shape + (batch_size, channels, height, width). + https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950 + """ + + def __init__(self, normalized_shape, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.normalized_shape = (normalized_shape,) + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + +def get_norm(norm, out_channels): + """ + Args: + norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; + or a callable that takes a channel number and returns + the normalization layer as a nn.Module. + + Returns: + nn.Module or None: the normalization layer + """ + if norm is None: + return None + if isinstance(norm, str): + if len(norm) == 0: + return None + norm = { + "LN": lambda channels: LayerNorm(channels), + }[norm] + return norm(out_channels) + +class SimpleFP(nn.Module): + """ + This module implements SimpleFPN in :paper:`vitdet`. + It creates pyramid features built on top of the input feature map. + """ + + def __init__( + self, + out_channels, + scale_factors=[4.0, 2.0, 1.0, 0.5], + top_block=None, + norm="LN", + square_pad=0, + dim=1024, + stride=14, + ): + """ + Args: + out_channels (int): number of channels in the output feature maps. + scale_factors (list[float]): list of scaling factors to upsample or downsample + the input features for creating pyramid features. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + pyramid output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra pyramid levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + norm (str): the normalization to use. + square_pad (int): If > 0, require input images to be padded to specific square size. + """ + super(SimpleFP, self).__init__() + + self.scale_factors = scale_factors + + strides = [int(stride / scale) for scale in scale_factors] + + self.stages = [] + use_bias = norm == "" + for idx, scale in enumerate(scale_factors): + out_dim = dim + if scale == 4.0: + layers = [ + nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2), + get_norm(norm, dim // 2), + nn.GELU(), + nn.ConvTranspose2d(dim // 2, dim // 4, kernel_size=2, stride=2), + ] + out_dim = dim // 4 + elif scale == 2.0: + layers = [nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2)] + out_dim = dim // 2 + elif scale == 1.0: + layers = [] + elif scale == 0.5: + layers = [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + raise NotImplementedError(f"scale_factor={scale} is not supported yet.") + + layers.extend( + [ + Conv2d( + out_dim, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + ), + Conv2d( + out_channels, + out_channels, + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + ), + ] + ) + layers = nn.Sequential(*layers) + + stage = int(math.log2(strides[idx])) + self.add_module(f"simfp_{stage}", layers) + self.stages.append(layers) + + self.top_block = top_block + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = strides[-1] + self._square_pad = square_pad + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: + mapping from feature map name to pyramid feature map tensor + in high to low resolution order. Returned feature names follow the FPN + convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + features = x + results = [] + + for stage in self.stages: + results.append(stage(features)) + + assert len(self._out_features) == len(results) + return results + + +if __name__ == "__main__": + """ + Test the functionality of SimpleFPN (Feature Pyramid Network). + + The test uses an input tensor of shape (1, 1024, 28, 28). + """ + import torch + + # Generate a dummy input tensor of shape (batch_size=1, channels=1024, height=28, width=28) + test_input = torch.randn(1, 1024, 28, 28) + + # Instantiate the SimpleFP (assumed to be the Feature Pyramid module) + # Note: The arguments below should be checked and adapted according to SimpleFP's actual constructor. + fpn = SimpleFP( + out_channels=256, # Number of output channels for FPN layers + norm="LN", # Normalization type, here using LayerNorm ("LN") + square_pad=0, # Square padding size if needed (here 0 means no padding) + dim=1024, # Number of input channels/features from the backbone + stride=14 # Stride setting, typically related to feature scaling + ) + + # ~~~~~ Model Forward Pass ~~~~~ + # Compute FPN outputs with torch.no_grad() to avoid tracking gradients (for eval/testing) + with torch.no_grad(): + output = fpn(test_input) # Expected: result is a list of feature tensors at different FPN stages + + # ~~~~~ Print Input/Output Information ~~~~~ + print("SimpleFPN Test Results:") + print(f"Input Shape: {test_input.shape}") + print("Output feature maps from each FPN stage:") + + # NOTE: If the output is a list, the features are not named (unlike dict). Adapt print info accordingly. + for idx, feature_map in enumerate(output): + print(f" Output stage {idx}: shape = {feature_map.shape}") + + # If output were a dict with feature names, iterate as below instead: + # for feature_name, feature_map in output.items(): + # print(f" {feature_name}: {feature_map.shape}") + \ No newline at end of file diff --git a/vlm_fo1/model/omchat_arch.py b/vlm_fo1/model/omchat_arch.py new file mode 100755 index 0000000000000000000000000000000000000000..a00bb4a8915a4fa953b7cfed9c4a5e9263e6817b --- /dev/null +++ b/vlm_fo1/model/omchat_arch.py @@ -0,0 +1,72 @@ +from abc import ABC, abstractmethod + +from vlm_fo1.model.multimodal_encoder.builder import build_vision_tower, build_vision_tower_aux +from vlm_fo1.model.multimodal_projector.builder import build_vision_projector, build_vision_projector_aux +from vlm_fo1.model.multimodal_visual_prompt_encoder.hybrid_finegrained_region_encoder import HFREModule + +class OmChatMetaModel: + def __init__(self, config): + super(OmChatMetaModel, self).__init__(config) + # print('----------------------delay_load:', config.delay_load) + if getattr(config, "mm_vision_tower", None) is not None: + self.vision_tower = build_vision_tower(config, delay_load=getattr(config, 'delay_load', True)) + if getattr(config, "mm_vision_tower", None) is not None: + self.mm_projector = build_vision_projector(config) + if getattr(config, "mm_vision_tower_aux", None) is not None: + self.vision_tower_aux = build_vision_tower_aux(config, delay_load=getattr(config, 'delay_load', True)) + self.object_vp_extractor = HFREModule( + roi_output_size=getattr(config, "mm_roi_output_size", 7), + region_feature_dim=config.mm_region_hidden_size, + apply_position_embedding=getattr(config, "mm_apply_position_embedding", True), + pos_embedding_strategy=getattr(config, "mm_pos_embedding_strategy", "bbox_based"), + use_vt_region_feature_only=getattr(config, "mm_use_vt_region_feature_only", False), + use_vision_tower_region_feature=getattr(config, "mm_use_vision_tower_region_feature", False), + region_feature_combination=getattr(config, "mm_region_feature_combination", "concat"), + apply_region_layer_norm=getattr(config, "mm_apply_region_layer_norm", False), + vision_tower_region_feature_dim=self.get_vision_tower().config.hidden_size * 4 if not getattr(config, "mm_use_simpleFPN_for_vt", False) else 2048, + vision_tower_spatial_scale=1/self.get_vision_tower().config.patch_size, + use_simpleFPN_for_vt=getattr(config, "mm_use_simpleFPN_for_vt", False), + aux_vision_tower_spatial_scale=0.25, + aux_vision_tower_region_feature_dims=[256, 512, 1024, 2048], + ) + if getattr(config, "mm_vision_tower_aux", None) is not None: + self.mm_projector_aux = build_vision_projector_aux(config) + + def get_vision_tower(self): + vision_tower = getattr(self, 'vision_tower', None) + if type(vision_tower) is list: + vision_tower = vision_tower[0] + return vision_tower + + def get_vision_tower_aux(self): + vision_tower_aux = getattr(self, 'vision_tower_aux', None) + if type(vision_tower_aux) is list: + vision_tower_aux = vision_tower_aux[0] + return vision_tower_aux + + def get_video_tower(self): + video_tower = getattr(self, 'video_tower', None) + if type(video_tower) is list: + video_tower = video_tower[0] + return video_tower + + +class OmChatMetaForCausalLM(ABC): + + @abstractmethod + def get_model(self): + pass + + def get_vision_tower(self): + return self.get_model().get_vision_tower() + + def get_vision_tower_aux(self): + return self.get_model().get_vision_tower_aux() + + def get_video_tower(self): + return self.get_model().get_vision_tower() + + def encode_videos(self, videos): # [mini_b, c, t, h, w] + video_features = self.get_model().get_video_tower()(videos) # [mini_b, t, n, c] + video_features = self.get_model().mm_projector.forward_video(video_features) + return video_features \ No newline at end of file diff --git a/vlm_fo1/task_templates.py b/vlm_fo1/task_templates.py new file mode 100644 index 0000000000000000000000000000000000000000..30dd45aad6986926c1ed5de97de46a2a344c5820 --- /dev/null +++ b/vlm_fo1/task_templates.py @@ -0,0 +1,19 @@ +OD_template = "Please detect {} in this image. Answer the question with object indexes." + +OD_Counting_template = "How many {} are there in this image? Count each instance of the target object. Locate them with object indexes and then answer the question with the number of objects." + +REC_template = "Please detect {} in this image. Answer the question with object indexes." + +Region_OCR_template = "Please provide the ocr results of {} in the image." + +Brief_Region_Caption_template = "Provide a brief description for {}." + +Detailed_Region_Caption_template = "Provide a detailed description for {}." + +Grounding_template = "Briefly describe this image and detect all mentioned objects. Answer with grounded object indexes." + +Visual_Prompt_OD_template = "Using the provided object {} as a reference, identify all other objects of the same category in this image. Respond with object indexes." + +Viusal_Region_Reasoning_template = "First thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Please give a detailed reasoning process process and provide image regions that can help you answer the question better. {}" + +OD_All_template = "Please identify every object in the image and provide their labels along with their indexes." \ No newline at end of file