title = “718” date = 2022-05-10T21:19:49-05:00 draft = false
["Bob Smith", "David Jones"]
.authors = [“Puneet Mathur”, “Vlad I Morariu”, “Verena Kaynig-Fittkau”, “Jiuxiang Gu”, “Franck Dernoncourt”, “Quan Hung Tran”, “Ani Nenkova”, “Dinesh Manocha”, “Rajiv Jain”]
publication_types = [“1”]
publication = “DocTime: A Document-level Temporal Dependency Graph Parser”
abstract = “We introduce DocTime - a novel temporal dependency graph (TDG) parser that takes as input a text document and produces a temporal dependency graph. It outperforms previous BERT-based solutions by a relative 4-8% on three datasets from modeling the problem as a graph network with path-prediction loss to incorporate longer range dependencies. This work also demonstrates how the TDG graph can be used to improve the downstream tasks of temporal questions answering and NLI by a relative 4-10% with a new framework that incorporates the temporal dependency graph into the self-attention layer of Transformer models (Time-transformer). Finally, we develop and evaluate on a new temporal dependency graph dataset for the domain of contractual documents, which has not been previously explored in this setting. “
proceedings = “2022 Annual Conference of the North American Chapter of the Association for Computational Linguistics”
proceedings_short = “NAACL, 2022”
number = “”
start_page = “”
end_page = “”
publisher = “”
image_preview = “”
selected = false
projects = ["deep-learning"]
references content/project/deep-learning.md
.projects = []
.projects = []
tags = []
for no tags, or use the form tags = ["A Tag", "Another Tag"]
for one or more tags.tags = []
url_pdf = “” url_preprint = “” url_code = “” url_dataset = “” url_project = “” url_slides = “” url_video = “” url_poster = “” url_source = “”
[{...}, {...}, {...}]
.math = false