Datasets:

Modalities:
Image
Languages:
English
ArXiv:
Tags:
code
License:
File size: 3,581 Bytes
760e6b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ff6559
 
 
b6de6a7
0ff6559
 
 
 
 
b6de6a7
 
0ff6559
760e6b0
 
 
 
92f75f0
760e6b0
0ff6559
 
 
760e6b0
0ff6559
 
 
760e6b0
0ff6559
 
 
760e6b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
affbc94
760e6b0
9f9e358
affbc94
760e6b0
9f9e358
760e6b0
 
 
 
e5a84b8
 
760e6b0
 
 
 
 
e5a84b8
760e6b0
 
e5a84b8
760e6b0
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import json
import os
import datasets

_CITATION = """\
@InProceedings{...},
title = {Your Dataset Title},
author={Your Name},
year={2025}
}
"""

_DESCRIPTION = """\
Dataset containing multi-view images with camera poses, depth maps, and masks for NeRF training.
"""

_LICENSE = "MIT"

class RefRefConfig(datasets.BuilderConfig):
    """BuilderConfig for RefRef dataset."""

    def __init__(self, scene=None, **kwargs):
        """BuilderConfig for RefRef dataset.

        Args:
            **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(**kwargs)
        self.scene = scene

class RefRef(datasets.GeneratorBasedBuilder):
    """A dataset loader for NeRF-style data with camera poses, depth maps, and masks."""

    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIG_CLASS = RefRefConfig
    BUILDER_CONFIGS = [
        RefRefConfig(
            name="single_non_convex",
            description="Single non-convex scene configuration for RefRef dataset.",
        ),
        RefRefConfig(
            name="multiple_non_convex",
            description="Multiple non-convex scene configuration for RefRef dataset.",
        ),
        RefRefConfig(
            name="single_convex",
            description="Single convex scene configuration for RefRef dataset.",
        )
    ]

    def _info(self):
        features = datasets.Features({
            "image": datasets.Image(),
            "depth": datasets.Image(),
            "mask": datasets.Image(),
            "transform_matrix": datasets.Sequence(
                datasets.Sequence(datasets.Value("float64"), length=4),
                length=4
            ),
            "rotation": datasets.Value("float32")
        })
        
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage="",
            license=_LICENSE,
            citation=_CITATION
        )

    def _split_generators(self, dl_manager):
        # Automatically find all JSON files matching the split patterns
        return [
            datasets.SplitGenerator(
                name=cat,
                gen_kwargs={
                    "filepaths": os.path.join(f"https://huggingface.co/datasets/yinyue27/RefRef/resolve/main/image_data/{cat}/{self.config.name}/{self.config.scene}"),
                    "split": cat
                },
            ) for cat in ["textured_sphere_scene", "textured_cube_scene"]
        ]

    def _generate_examples(self, filepaths, split):
        for split in ["train", "val", "test"]:
            split_filepaths = os.path.join(filepaths, f"transforms_{split}.json")
            with open(split_filepaths, "r", encoding="utf-8") as f:
                try:
                    data = json.load(f)
                except json.JSONDecodeError:
                    print("error")
    
                scene_name = os.path.basename(os.path.dirname(split_filepaths))
                
                for frame_idx, frame in enumerate(data.get("frames", [])):
                    base_dir = os.path.dirname(split_filepaths)
                    
                    yield f"{scene_name}_{frame_idx}", {
                        "image": os.path.join(base_dir, frame["file_path"]+".png"),
                        "depth": os.path.join(base_dir, frame["depth_file_path"]),
                        "mask": os.path.join(base_dir, frame["mask_file_path"]),
                        "transform_matrix": frame["transform_matrix"],
                        "rotation": frame.get("rotation", 0.0)
                    }