{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T18:56:43Z","timestamp":1772823403982,"version":"3.50.1"},"publisher-location":"Cham","reference-count":54,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031733895","type":"print"},{"value":"9783031733901","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:00:00Z","timestamp":1730332800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:00:00Z","timestamp":1730332800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73390-1_18","type":"book-chapter","created":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T16:24:01Z","timestamp":1730305441000},"page":"301-318","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Self-adapting Large Visual-Language Models to\u00a0Edge Devices Across Visual Modalities"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7988-837X","authenticated-orcid":false,"given":"Kaiwen","family":"Cai","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0283-8419","authenticated-orcid":false,"given":"Zhekai","family":"Duan","sequence":"additional","affiliation":[]},{"given":"Gaowen","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Charles","family":"Fleming","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3733-4480","authenticated-orcid":false,"given":"Chris Xiaoxuan","family":"Lu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,31]]},"reference":[{"key":"18_CR1","unstructured":"Achiam, J., et al: GPT-4 technical report. arXiv arXiv:2303.08774 (2023)"},{"key":"18_CR2","doi-asserted-by":"crossref","unstructured":"Cai, Y., Yao, Z., Dong, Z., Gholami, A., Mahoney, M.W., Keutzer, K.: ZeroQ: a novel zero shot quantization framework. In: Computer Vision and Pattern Recognition (CVPR), pp. 13166\u201313175. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.01318"},{"key":"18_CR3","doi-asserted-by":"crossref","unstructured":"Chen, R., et al.: CLIP2Scene: towards Label-efficient 3D Scene Understanding by CLIP. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00678"},{"key":"18_CR4","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning, pp. 1597\u20131607. PMLR (2020)"},{"key":"18_CR5","unstructured":"Choi, J., Wang, Z., Venkataramani, S., Chuang, P.I.J., Srinivasan, V., Gopalakrishnan, K.: PACT: parameterized clipping activation for quantized neural networks. openreview.net arXiv:1805.06085 (2018)"},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: richly-annotated 3D reconstructions of indoor scenes. arXiv arXiv:1702.04405 (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Dai, X., et al.: General instance distillation for object detection. In: Computer Vision and Pattern Recognition (CVPR), pp. 7842\u20137851 (2021)","DOI":"10.1109\/CVPR46437.2021.00775"},{"key":"18_CR8","unstructured":"Dettmers, T., Lewis, M., Zettlemoyer, L.: Gpt3.int8(): 8-bit matrix multiplication for transformers at scale. In: Conference on Neural Information Processing Systems (NeurIPS) (2022)"},{"key":"18_CR9","doi-asserted-by":"crossref","unstructured":"Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., QI, X.: PLA: language-driven open-vocabulary 3D scene understanding. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00677"},{"key":"18_CR10","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\,\\times \\,$$16 words :transformers for image recognition at scale. arXiv:2010.11929 (2020)"},{"key":"18_CR11","doi-asserted-by":"crossref","unstructured":"Du, Y., Wei, F., Zhang, Z., Shi, M., Gao, Y., Li, G.: Learning to prompt for open-vocabulary object detection with vision-language model. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14084\u201314093 (2022)","DOI":"10.1109\/CVPR52688.2022.01369"},{"key":"18_CR12","unstructured":"Esser, S.K., McKinstry, J.L., Bablani, D., Appuswamy, R., Modha, D.S.: Learned step size quantization. arXiv preprint arXiv:1902.08153 (2020)"},{"key":"18_CR13","doi-asserted-by":"crossref","unstructured":"Fang, R., Pang, G., Bai, X.: Simple image-level classification improves open-vocabulary object detection. arXiv arXiv:2312.10439 (2023)","DOI":"10.1609\/aaai.v38i2.27939"},{"key":"18_CR14","doi-asserted-by":"crossref","unstructured":"Fischedick, S.B., Seichter, D., Schmidt, R., Rabes, L., Gross, H.M.: Efficient multi-task scene analysis with RGB-D transformers. In: IEEE International Joint Conference on Neural Network (IJCNN), pp. 1\u201310 (2023)","DOI":"10.1109\/IJCNN54540.2023.10191977"},{"key":"18_CR15","doi-asserted-by":"crossref","unstructured":"Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: European Conference on Computer Vision (ECCV) (2022)","DOI":"10.1007\/978-3-031-20059-5_31"},{"key":"18_CR16","unstructured":"Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. In: International Conference on Learning Representations (ICLR) (2022)"},{"key":"18_CR17","doi-asserted-by":"publisher","first-page":"103352","DOI":"10.1016\/j.cviu.2021.103352","volume":"216","author":"FM Hafner","year":"2022","unstructured":"Hafner, F.M., Bhuyian, A., Kooij, J.F., Granger, E.: Cross-modal distillation for RGB-depth person re-identification. Comput. Vis. Image Underst. 216, 103352 (2022)","journal-title":"Comput. Vis. Image Underst."},{"key":"18_CR18","unstructured":"Han, J., Pei, J., Tong, H.: Data Mining: Concepts and Techniques. Morgan Kaufmann (2022)"},{"key":"18_CR19","doi-asserted-by":"crossref","unstructured":"He, S., Guo, T., Dai, T., Qiao, R., Ren, B., Xia, S.: Open-vocabulary multi-label classification via multi-modal knowledge transfer. In: Proceedings of the AAAI Conference on Artificial Intelligence, no. 1, pp. 808\u2013816 (2022)","DOI":"10.1609\/aaai.v37i1.25159"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"He, W., Jamonnak, S., Gou, L., Ren, L.: Clip-SMATHENVDOLLARi: language-guided self-supervised semantic segmentation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.01078"},{"issue":"7","key":"18_CR21","doi-asserted-by":"publisher","first-page":"2217","DOI":"10.1109\/JSTARS.2019.2918242","volume":"12","author":"P Helber","year":"2018","unstructured":"Helber, P., Bischke, B., Dengel, A., Borth, D.: EuroSAT: a novel dataset and deep learning benchmark for land use and land cover classification. IEEE J. Sel. Top. Appl. Earth Observations Remote Sens. 12(7), 2217\u20132226 (2018)","journal-title":"IEEE J. Sel. Top. Appl. Earth Observations Remote Sens."},{"key":"18_CR22","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"key":"18_CR23","doi-asserted-by":"crossref","unstructured":"Hoffman, J., Gupta, S., Leong, J., Guadarrama, S., Darrell, T.: Cross-modal adaptation for RGB-D detection. In: 2016 IEEE International Conference on Robotics and Automation (ICRA), pp. 5032\u20135039. IEEE (2016)","DOI":"10.1109\/ICRA.2016.7487708"},{"key":"18_CR24","doi-asserted-by":"crossref","unstructured":"Hong, Y., Dai, H., Ding, Y.: Cross-modality knowledge distillation network for monocular 3D object detection. In: European Conference on Computer Vision (ECCV) (2022)","DOI":"10.1007\/978-3-031-20080-9_6"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Huynh, D., Kuen, J., Lin, Z., Gu, J., Elhamifar, E.: Open-vocabulary instance segmentation via robust cross-modal pseudo-labeling. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7020\u20137031 (2022)","DOI":"10.1109\/CVPR52688.2022.00689"},{"key":"18_CR26","doi-asserted-by":"publisher","unstructured":"Ilharco, G., et al.: Openclip (2021). https:\/\/doi.org\/10.5281\/zenodo.5143773","DOI":"10.5281\/zenodo.5143773"},{"key":"18_CR27","doi-asserted-by":"crossref","unstructured":"Jacob, B., et al.: Quantization and training of neural networks for efficient integer-arithmetic-only inference. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2704\u20132713. IEEE (2018)","DOI":"10.1109\/CVPR.2018.00286"},{"key":"18_CR28","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: International Conference on Machine Learning (ICML), pp. 4904\u20134916 (2021)"},{"key":"18_CR29","unstructured":"Krishnamoorthi, R.: Quantizing deep convolutional networks for efficient inference: a whitepaper. arXiv arXiv:1806.08342 (2018)"},{"key":"18_CR30","unstructured":"Kuo, W., Cui, Y., Gu, X., Piergiovanni, A., Angelova, A.: Open-vocabulary object detection upon frozen vision and language models. ICLR 2023 poster arXiv:abs\/2209.15639 (2023)"},{"key":"18_CR31","doi-asserted-by":"crossref","unstructured":"Lee, J., Kim, D., Ham, B.: Network quantization with element-wise gradient scaling. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6444\u20136453. IEEE (2021)","DOI":"10.1109\/CVPR46437.2021.00638"},{"key":"18_CR32","unstructured":"Li, Y., Xu, S., Zhang, B., Cao, X., Gao, P., Guo, G.: Q-ViT: accurate and fully quantized low-bit vision transformer. In: Conference on Neural Information Processing Systems (NeurIPS), vol. 35, pp. 34451\u201334463 (2022)"},{"key":"18_CR33","doi-asserted-by":"crossref","unstructured":"Li, Y., Adamczewski, K., Li, W., Gu, S., Timofte, R., Gool, L.: Revisiting random channel pruning for neural network compression. In: Computer Vision and Pattern Recognition (CVPR), pp. 191\u2013201. IEEE (2022)","DOI":"10.1109\/CVPR52688.2022.00029"},{"key":"18_CR34","doi-asserted-by":"crossref","unstructured":"Liu, J., Niu, L., Yuan, Z., Yang, D., Wang, X., Liu, W.: PD-Quant: post-training quantization based on prediction difference metric. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.02340"},{"key":"18_CR35","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. arXiv arXiv:2103.14030 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"18_CR36","doi-asserted-by":"crossref","unstructured":"Minderer, M., et al.: Simple open-vocabulary object detection. arXiv:2205.06230 (2022)","DOI":"10.1007\/978-3-031-20080-9_42"},{"key":"18_CR37","doi-asserted-by":"crossref","unstructured":"Nagel, M., Baalen, M.v., Blankevoort, T., Welling, M.: Data-free quantization through weight equalization and bias correction. In: IEEE International Conference on Computer Vision (ICCV), pp. 1325\u20131334 (2019)","DOI":"10.1109\/ICCV.2019.00141"},{"key":"18_CR38","doi-asserted-by":"crossref","unstructured":"Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T.: Openscene: 3D scene understanding with open vocabularies. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00085"},{"key":"18_CR39","unstructured":"Qin, J., et al.: FreeSeg: unified, universal and open-vocabulary image segmentation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)"},{"key":"18_CR40","unstructured":"Radford, A., et al.: [CLIP] Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning (ICML), pp. 8748\u20138763 (2021)"},{"key":"18_CR41","unstructured":"Romero, A., Ballas, N., Kahou, S.E., Chassang, A., Gatta, C., Bengio, Y.: FitNets: hints for thin deep nets. In: International Conference on Learning Representations (ICLR) (2015)"},{"key":"18_CR42","doi-asserted-by":"crossref","unstructured":"Schroff, F., Kalenichenko, D., Philbin, J.: FaceNet: a unified embedding for face recognition and clustering. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"18_CR43","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"746","DOI":"10.1007\/978-3-642-33715-4_54","volume-title":"Computer Vision \u2013 ECCV 2012","author":"N Silberman","year":"2012","unstructured":"Silberman, N., Hoiem, D., Kohli, P., Fergus, R.: Indoor segmentation and support inference from RGBD images. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7576, pp. 746\u2013760. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33715-4_54"},{"key":"18_CR44","doi-asserted-by":"crossref","unstructured":"Song, S., Lichtenberg, S.P., Xiao, J.: SUN RGB-D: a RGB-D scene understanding benchmark suite. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 567\u2013576 (2015)","DOI":"10.1109\/CVPR.2015.7298655"},{"key":"18_CR45","doi-asserted-by":"crossref","unstructured":"Su, J.C., Maji, S.: Adapting models to signal degradation using distillation. In: British Machine Vision Conference (BMVC) (2017)","DOI":"10.5244\/C.31.21"},{"key":"18_CR46","doi-asserted-by":"crossref","unstructured":"Thoker, F.M., Gall, J.: Cross-modal knowledge distillation for action recognition. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 6\u201310. IEEE (2019)","DOI":"10.1109\/ICIP.2019.8802909"},{"key":"18_CR47","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., Jegou, H.: Training data-efficient image transformers & distillation through attention. In: Proceedings of the 38th International Conference on Machine Learning, pp. 10347\u201310357. PMLR (2021)"},{"key":"18_CR48","unstructured":"Wu, H., Judd, P., Zhang, X., Isaev, M., Micikevicius, P.: Integer quantization for deep learning inference: principles and empirical evaluation. arXiv arXiv:2004.09602 (2020)"},{"key":"18_CR49","doi-asserted-by":"crossref","unstructured":"Xia, Z., Pan, X., Song, S., Li, L.E., Huang, G.: Vision transformer with deformable attention. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4794\u20134803 (2022)","DOI":"10.1109\/CVPR52688.2022.00475"},{"key":"18_CR50","unstructured":"Xiao, G., Lin, J., Seznec, M., Wu, H., Demouth, J., Han, S.: SmoothQuant: accurate and efficient post-training quantization for large language models. In: International Conference on Machine Learning (ICML), pp. 38087\u201338099 (2023)"},{"key":"18_CR51","doi-asserted-by":"crossref","unstructured":"Xue, L., et al.: ULIP: learning a unified representation of language, images, and point clouds for 3D understanding. In: Computer Vision and Pattern Recognition (CVPR), pp. 1179\u20131189 (2023)","DOI":"10.1109\/CVPR52729.2023.00120"},{"key":"18_CR52","doi-asserted-by":"crossref","unstructured":"Yang, C., et al.: MixSKD: self-knowledge distillation from mixup for image recognition. In: European Conference on Computer Vision (ECCV) (2022)","DOI":"10.1007\/978-3-031-20053-3_31"},{"key":"18_CR53","doi-asserted-by":"crossref","unstructured":"Zareian, A., Rosa, K.D., Hu, D.H., Chang, S.F.: Open-vocabulary object detection using captions. In: Computer Vision and Pattern Recognition (CVPR), pp. 14393\u201314402 (2021)","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"18_CR54","doi-asserted-by":"crossref","unstructured":"Zhang, R., et al.: PointCLIP: point cloud understanding by CLIP. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8552\u20138562 (2022)","DOI":"10.1109\/CVPR52688.2022.00836"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73390-1_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T16:33:56Z","timestamp":1730306036000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73390-1_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,31]]},"ISBN":["9783031733895","9783031733901"],"references-count":54,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73390-1_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,31]]},"assertion":[{"value":"31 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}