Skip to content

Commit fc78cca

Browse files
authored
Update papers.bib
1 parent 8eb0cfe commit fc78cca

File tree

1 file changed

+11
-17
lines changed

1 file changed

+11
-17
lines changed

_bibliography/papers.bib

Lines changed: 11 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4,26 +4,20 @@
44
@string{aps = {American Physical Society,}}
55
66
@inproceedings{wang-etal-2024-towards-human-like,
7-
title = "Towards Human-Like Machine Comprehension: Few-Shot Relational Learning in Visually-Rich Documents",
8-
author = "Wang, Hao and
7+
title ={Towards Human-Like Machine Comprehension: Few-Shot Relational Learning in Visually-Rich Documents},
8+
author = {Wang, Hao and
99
Li, Tang and
1010
Chu, Chenhui and
1111
Wang, Rui and
12-
Zhu, Pinpin",
13-
editor = "Calzolari, Nicoletta and
14-
Kan, Min-Yen and
15-
Hoste, Veronique and
16-
Lenci, Alessandro and
17-
Sakti, Sakriani and
18-
Xue, Nianwen",
19-
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
20-
month = may,
21-
year = "2024",
22-
address = "Torino, Italia",
23-
publisher = "ELRA and ICCL",
24-
url = "https://aclanthology.org/2024.lrec-main.1439",
25-
pages = "16557--16569",
26-
abstract = "Key-value relations are prevalent in Visually-Rich Documents (VRDs), often depicted in distinct spatial regions accompanied by specific color and font styles. These non-textual cues serve as important indicators that greatly enhance human comprehension and acquisition of such relation triplets. However, current document AI approaches often fail to consider this valuable prior information related to visual and spatial features, resulting in suboptimal performance, particularly when dealing with limited examples. To address this limitation, our research focuses on few-shot relational learning, specifically targeting the extraction of key-value relation triplets in VRDs. Given the absence of a suitable dataset for this task, we introduce two new few-shot benchmarks built upon existing supervised benchmark datasets. Furthermore, we propose a variational approach that incorporates relational 2D-spatial priors and prototypical rectification techniques. This approach aims to generate relation representations that are more aware of the spatial context and unseen relation in a manner similar to human perception. Experimental results demonstrate the effectiveness of our proposed method by showcasing its ability to outperform existing methods. This study also opens up new possibilities for practical applications.",
12+
Zhu, Pinpin},
13+
booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024, CCF-B)},
14+
month = {may},
15+
year = {2024},
16+
address = {Torino, Italia},
17+
publisher = {ELRA and ICCL},
18+
url = {https://aclanthology.org/2024.lrec-main.1439},
19+
pages = {16557--16569},
20+
abstract = {Key-value relations are prevalent in Visually-Rich Documents (VRDs), often depicted in distinct spatial regions accompanied by specific color and font styles. These non-textual cues serve as important indicators that greatly enhance human comprehension and acquisition of such relation triplets. However, current document AI approaches often fail to consider this valuable prior information related to visual and spatial features, resulting in suboptimal performance, particularly when dealing with limited examples. To address this limitation, our research focuses on few-shot relational learning, specifically targeting the extraction of key-value relation triplets in VRDs. Given the absence of a suitable dataset for this task, we introduce two new few-shot benchmarks built upon existing supervised benchmark datasets. Furthermore, we propose a variational approach that incorporates relational 2D-spatial priors and prototypical rectification techniques. This approach aims to generate relation representations that are more aware of the spatial context and unseen relation in a manner similar to human perception. Experimental results demonstrate the effectiveness of our proposed method by showcasing its ability to outperform existing methods. This study also opens up new possibilities for practical applications.},
2721
}
2822
2923
@article{DBLP:journals/ijseke/GuLW23,

0 commit comments

Comments
 (0)