@inproceedings{einolghozati-etal-2020-sound,
title = "Sound Natural: Content Rephrasing in Dialog Systems",
author = "Einolghozati, Arash and
Gupta, Anchit and
Diedrick, Keith and
Gupta, Sonal",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.414/",
doi = "10.18653/v1/2020.emnlp-main.414",
pages = "5101--5108",
abstract = "We introduce a new task of rephrasing for a more natural virtual assistant. Currently, virtual assistants work in the paradigm of intent-slot tagging and the slot values are directly passed as-is to the execution engine. However, this setup fails in some scenarios such as messaging when the query given by the user needs to be changed before repeating it or sending it to another user. For example, for queries like {\textquoteleft}ask my wife if she can pick up the kids' or {\textquoteleft}remind me to take my pills', we need to rephrase the content to {\textquoteleft}can you pick up the kids' and {\textquoteleft}take your pills'. In this paper, we study the problem of rephrasing with messaging as a use case and release a dataset of 3000 pairs of original query and rephrased query. We show that BART, a pre-trained transformers-based masked language model, is a strong baseline for the task, and show improvements by adding a copy-pointer and copy loss to it. We analyze different trade-offs of BART-based and LSTM-based seq2seq models, and propose a distilled LSTM-based seq2seq as the best practical model"
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="einolghozati-etal-2020-sound">
<titleInfo>
<title>Sound Natural: Content Rephrasing in Dialog Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Arash</namePart>
<namePart type="family">Einolghozati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anchit</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Keith</namePart>
<namePart type="family">Diedrick</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sonal</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bonnie</namePart>
<namePart type="family">Webber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We introduce a new task of rephrasing for a more natural virtual assistant. Currently, virtual assistants work in the paradigm of intent-slot tagging and the slot values are directly passed as-is to the execution engine. However, this setup fails in some scenarios such as messaging when the query given by the user needs to be changed before repeating it or sending it to another user. For example, for queries like ‘ask my wife if she can pick up the kids’ or ‘remind me to take my pills’, we need to rephrase the content to ‘can you pick up the kids’ and ‘take your pills’. In this paper, we study the problem of rephrasing with messaging as a use case and release a dataset of 3000 pairs of original query and rephrased query. We show that BART, a pre-trained transformers-based masked language model, is a strong baseline for the task, and show improvements by adding a copy-pointer and copy loss to it. We analyze different trade-offs of BART-based and LSTM-based seq2seq models, and propose a distilled LSTM-based seq2seq as the best practical model</abstract>
<identifier type="citekey">einolghozati-etal-2020-sound</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-main.414</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-main.414/</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>5101</start>
<end>5108</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sound Natural: Content Rephrasing in Dialog Systems
%A Einolghozati, Arash
%A Gupta, Anchit
%A Diedrick, Keith
%A Gupta, Sonal
%Y Webber, Bonnie
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F einolghozati-etal-2020-sound
%X We introduce a new task of rephrasing for a more natural virtual assistant. Currently, virtual assistants work in the paradigm of intent-slot tagging and the slot values are directly passed as-is to the execution engine. However, this setup fails in some scenarios such as messaging when the query given by the user needs to be changed before repeating it or sending it to another user. For example, for queries like ‘ask my wife if she can pick up the kids’ or ‘remind me to take my pills’, we need to rephrase the content to ‘can you pick up the kids’ and ‘take your pills’. In this paper, we study the problem of rephrasing with messaging as a use case and release a dataset of 3000 pairs of original query and rephrased query. We show that BART, a pre-trained transformers-based masked language model, is a strong baseline for the task, and show improvements by adding a copy-pointer and copy loss to it. We analyze different trade-offs of BART-based and LSTM-based seq2seq models, and propose a distilled LSTM-based seq2seq as the best practical model
%R 10.18653/v1/2020.emnlp-main.414
%U https://aclanthology.org/2020.emnlp-main.414/
%U https://doi.org/10.18653/v1/2020.emnlp-main.414
%P 5101-5108
Markdown (Informal)
[Sound Natural: Content Rephrasing in Dialog Systems](https://aclanthology.org/2020.emnlp-main.414/) (Einolghozati et al., EMNLP 2020)
ACL
- Arash Einolghozati, Anchit Gupta, Keith Diedrick, and Sonal Gupta. 2020. Sound Natural: Content Rephrasing in Dialog Systems. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5101–5108, Online. Association for Computational Linguistics.