Hongchi Xia1,
Chih-Hao Lin1,
Hao-Yu Hsu1,
Quentin Leboutet2,
Katelyn Gao2,
Michael Paulitsch2,
Benjamin Ummenhofer2,
Shenlong Wang1
1University of Illinois at Urbana-Champaign 2Intel
HoloScene leverages a comprehensive interactive scene-graph representation, encoding object geometry, appearance, and physical properties alongside hierarchical and inter-object relationships. Reconstruction is formulated as an energy-based optimization problem, integrating observational data, physical constraints, and generative priors into a unified, coherent objective. The resulting digital twins exhibit complete and precise geometry, physical stability, and realistic rendering from novel viewpoints.
- [2025/10/08] Code is released. For more information, please visit our project page!
conda create -n holoscene -y python=3.10
conda activate holoscene
pip install torch==2.5.1+cu118 torchvision==0.20.1+cu118 torchaudio==2.5.1+cu118 --index-url https://download.pytorch.org/whl/cu118
pip install -r requirements.txt
pip install torch-scatter -f https://data.pyg.org/whl/torch-2.5.1+cu118.html
pip install git+https://github.com/nerfstudio-project/gsplat.git@24abe714105441f049b50be1fc8eb411d727e6e6
pip install git+https://github.com/NVlabs/nvdiffrast.git@729261dc64c4241ea36efda84fbf532cc8b425b8#egg=nvdiffrast
pip install git+https://github.com/facebookresearch/pytorch3d.git@75ebeeaea0908c5527e7b1e305fbc7681382db47
pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
pip install isaacsim==4.2.0.2 --extra-index-url https://pypi.nvidia.com
pip install isaacsim-extscache-physics==4.2.0.2 isaacsim-extscache-kit==4.2.0.2 isaacsim-extscache-kit-sdk==4.2.0.2 --extra-index-url https://pypi.nvidia.com
# Download Wonder3D+ checkpoints
python -c "
from huggingface_hub import snapshot_download
snapshot_download(repo_id='flamehaze1115/Wonder3D_plus', local_dir='./ckpts')
"
# Download LaMa model
cd lama
curl -LJO https://huggingface.co/smartywu/big-lama/resolve/main/big-lama.zip
unzip big-lama.zip
cd ..
# Download Omnidata normal estimation model
wget "https://zenodo.org/records/10447888/files/omnidata_dpt_normal_v2.ckpt"
# Set environment variables (if export gaussian splat usd needed)
export CPATH=$CPATH:./tiny-cuda-nn-include:$CONDA_PREFIX/includePlease download the preprocessed data at following ./scripts/data_download.sh and unzip in the data_dir folder. The resulting folder structure should be:
└── HoloScene
└── data_dir
├── replica
├── room_0
├── images
├── transforms.json
├── room_1
├── room_2
├── scannetpp
├── acd69a1746
├── 67d702f2e8
├── 7831862f02
├── gibson
├── Beechwood_0_int
├── Beechwood_1_int
├── custom
├── siebelgame
conda activate holoscene
# Stage 0: Generate depth and normal priors
data_dir="data_dir/replica/room_0"
python marigold/run.py \
--checkpoint="GonzaloMG/marigold-e2e-ft-normals" \
--modality normals \
--input_rgb_dir="${data_dir}/images" \
--output_dir="${data_dir}/"
python marigold/run.py \
--checkpoint="GonzaloMG/marigold-e2e-ft-depth" \
--modality depth \
--input_rgb_dir="${data_dir}/images" \
--output_dir="${data_dir}/"
# Stage 1: Initial reconstruction
python training/exp_runner.py --conf confs/replica/room_0/replica_room_0.conf
# Stage 2: Post-processing
python training/exp_runner_post.py --conf confs/replica/room_0/replica_room_0_post.conf \
--is_continue \
--timestamp latest \
--checkpoint 1000
# Stage 3: Texture refinement
python training/exp_runner_texture.py --conf confs/replica/room_0/replica_room_0_tex.conf \
--is_continue \
--timestamp latest \
--checkpoint 1000
# Stage 4: Gaussian on mesh
python training/exp_runner_gaussian_on_mesh.py --conf confs/replica/room_0/replica_room_0_tex.conf \
--is_continue \
--timestamp latest \
--checkpoint 1000
# Export results
python export/export_glb.py --conf confs/replica/room_0/replica_room_0_tex.conf \
--timestamp latest
python export/export_usd.py --conf confs/replica/room_0/replica_room_0_tex.conf \
--timestamp latest
python export/export_gs_usd.py --conf confs/replica/room_0/replica_room_0_tex.conf \
--timestamp latestWe thank the authors of:
for their foundational work and open-source contributions.
@misc{xia2025holoscene,
title={HoloScene: Simulation-Ready Interactive 3D Worlds from a Single Video},
author={Hongchi Xia and Chih-Hao Lin and Hao-Yu Hsu and Quentin Leboutet and Katelyn Gao and Michael Paulitsch and Benjamin Ummenhofer and Shenlong Wang},
year={2025},
eprint={2510.05560},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2510.05560},
}