@inproceedings{harrerfranke2023inovis, title = {Inovis: Instant Novel-View Synthesis}, author = {Harrer, Mathias and Franke, Linus and Fink, Laura and Stamminger, Marc and Weyrich, Tim}, month = dec, day = 12, year = 2023, booktitle = {SIGGRAPH Asia Conference Papers}, numpages = {12}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3610548.3618216}, doi = {10.1145/3610548.3618216}, abstract = {Novel-view synthesis is an ill-posed problem in that it requires inference of previously unseen information. Recently, reviving the traditional field of image-based rendering, neural methods proved particularly suitable for this interpolation/extrapolation task; however, they often require a-priori scene-completeness or costly preprocessing steps and generally suffer from long (scene-specific) training times. Our work draws from recent progress in neural spatio-temporal supersampling to enhance a state-of-the-art neural renderer's ability to infer novel-view information at inference time. We adapt a supersampling architecture [Xiao et al. 2020], which resamples previously rendered frames, to instead recombine nearby camera images in a multi-view dataset. These input frames are warped into a joint target frame, guided by the most recent (point-based) scene representation, followed by neural interpolation. The resulting architecture gains sufficient robustness to significantly improve transferability to previously unseen datasets. In particular, this enables novel applications for neural rendering where dynamically streamed content is directly incorporated in a (neural) image-based reconstruction of a scene. As we will show, our method reaches state-of-the-art performance when compared to previous works that rely on static and sufficiently densely sampled scenes; in addition, we demonstrate our system's particular suitability for dynamically streamed content, where our approach is able to produce high-fidelity novel-view synthesis even with significantly fewer available frames than competing neural methods.}, keywords = {Novel-View Synthesis, Point-based Graphics, Neural Rendering, Image-based Rendering, Reconstruction}, location = {Sydney, NSW, Australia}, date = {December 12-15, 2023}, authorurl = {https://reality.tf.fau.de/pub/harrerfranke2023inovis.html} }