@inproceedings{hu-etal-2024-mosel,
title = "{MOSEL}: Inference Serving Using Dynamic Modality Selection",
author = "Hu, Bodun and
Xu, Le and
Moon, Jeongyoon and
Yadwadkar, Neeraja J and
Akella, Aditya",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.501/",
doi = "10.18653/v1/2024.emnlp-main.501",
pages = "8872--8886",
abstract = "Rapid advancements over the years have helped machine learning models reach previously hard-to-achieve goals, sometimes even exceeding human capabilities. However, achieving desired accuracy comes at the cost of larger model sizes and increased computational demands. Thus, serving predictions from these models to meet any latency and cost requirements of applications remains a key challenge, despite recent work in building inference serving systems as well as algorithmic approaches that dynamically adapt models based on inputs. Our paper introduces a new form of dynamism, modality selection, where we adaptively choose modalities from inference inputs while maintaining the model quality. We introduce MOSEL, an automated inference serving system for multi-modal ML models that carefully picks input modalities per request based on user-defined performance and accuracy requirements. MOSEL exploits modality configurations extensively, improving system throughput by 3.6 $\times$ with an accuracy guarantee. It also reduces job completion times by 11$\times$ compared to modality-agnostic approaches."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hu-etal-2024-mosel">
<titleInfo>
<title>MOSEL: Inference Serving Using Dynamic Modality Selection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bodun</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Le</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeongyoon</namePart>
<namePart type="family">Moon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Neeraja</namePart>
<namePart type="given">J</namePart>
<namePart type="family">Yadwadkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aditya</namePart>
<namePart type="family">Akella</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Rapid advancements over the years have helped machine learning models reach previously hard-to-achieve goals, sometimes even exceeding human capabilities. However, achieving desired accuracy comes at the cost of larger model sizes and increased computational demands. Thus, serving predictions from these models to meet any latency and cost requirements of applications remains a key challenge, despite recent work in building inference serving systems as well as algorithmic approaches that dynamically adapt models based on inputs. Our paper introduces a new form of dynamism, modality selection, where we adaptively choose modalities from inference inputs while maintaining the model quality. We introduce MOSEL, an automated inference serving system for multi-modal ML models that carefully picks input modalities per request based on user-defined performance and accuracy requirements. MOSEL exploits modality configurations extensively, improving system throughput by 3.6 \times with an accuracy guarantee. It also reduces job completion times by 11\times compared to modality-agnostic approaches.</abstract>
<identifier type="citekey">hu-etal-2024-mosel</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.501</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.501/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>8872</start>
<end>8886</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MOSEL: Inference Serving Using Dynamic Modality Selection
%A Hu, Bodun
%A Xu, Le
%A Moon, Jeongyoon
%A Yadwadkar, Neeraja J.
%A Akella, Aditya
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F hu-etal-2024-mosel
%X Rapid advancements over the years have helped machine learning models reach previously hard-to-achieve goals, sometimes even exceeding human capabilities. However, achieving desired accuracy comes at the cost of larger model sizes and increased computational demands. Thus, serving predictions from these models to meet any latency and cost requirements of applications remains a key challenge, despite recent work in building inference serving systems as well as algorithmic approaches that dynamically adapt models based on inputs. Our paper introduces a new form of dynamism, modality selection, where we adaptively choose modalities from inference inputs while maintaining the model quality. We introduce MOSEL, an automated inference serving system for multi-modal ML models that carefully picks input modalities per request based on user-defined performance and accuracy requirements. MOSEL exploits modality configurations extensively, improving system throughput by 3.6 \times with an accuracy guarantee. It also reduces job completion times by 11\times compared to modality-agnostic approaches.
%R 10.18653/v1/2024.emnlp-main.501
%U https://aclanthology.org/2024.emnlp-main.501/
%U https://doi.org/10.18653/v1/2024.emnlp-main.501
%P 8872-8886
Markdown (Informal)
[MOSEL: Inference Serving Using Dynamic Modality Selection](https://aclanthology.org/2024.emnlp-main.501/) (Hu et al., EMNLP 2024)
ACL
- Bodun Hu, Le Xu, Jeongyoon Moon, Neeraja J Yadwadkar, and Aditya Akella. 2024. MOSEL: Inference Serving Using Dynamic Modality Selection. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 8872–8886, Miami, Florida, USA. Association for Computational Linguistics.