J. Pfister, T. Völker, A. Vlasjuk, und A. Hotho. Proceedings of the 1st Joint Workshop on Large Language Models and Structure Modeling (XLLM 2025), Seite 115--128. Vienna, Austria, Association for Computational Linguistics, (August 2025)
Zusammenfassung
We revisit the BARTABSA framework for aspect-based sentiment analysis with modern decoder LLMs to assess the importance of explicit structure modeling today. Our updated implementation - BARTABSA++ - features architectural enhancements that boost performance and training stability.Systematic testing with various encoder-decoder architectures shows that BARTABSA++ with BART-Large achieves state-of-the-art results, even surpassing a finetuned GPT-4o model.Our analysis indicates the encoder's representational quality is vital, while the decoder's role is minimal, explaining the limited benefits of scaling decoder-only LLMs for this task. These findings underscore the complementary roles of explicit structured modeling and large language models, indicating structured approaches remain competitive for tasks requiring precise relational information extraction.
%0 Conference Paper
%1 pfister-etal-2025-bartabsa
%A Pfister, Jan
%A Völker, Tom
%A Vlasjuk, Anton
%A Hotho, Andreas
%B Proceedings of the 1st Joint Workshop on Large Language Models and Structure Modeling (XLLM 2025)
%C Vienna, Austria
%D 2025
%E Fei, Hao
%E Tu, Kewei
%E Zhang, Yuhui
%E Hu, Xiang
%E Han, Wenjuan
%E Jia, Zixia
%E Zheng, Zilong
%E Cao, Yixin
%E Zhang, Meishan
%E Lu, Wei
%E Siddharth, N.
%E Øvrelid, Lilja
%E Xue, Nianwen
%E Zhang, Yue
%I Association for Computational Linguistics
%K myown author:vlasjuk author:völker author:hotho from:janpf author:pfister
%P 115--128
%T BARTABSA++: Revisiting BARTABSA with Decoder LLMs
%U https://aclanthology.org/2025.xllm-1.13/
%X We revisit the BARTABSA framework for aspect-based sentiment analysis with modern decoder LLMs to assess the importance of explicit structure modeling today. Our updated implementation - BARTABSA++ - features architectural enhancements that boost performance and training stability.Systematic testing with various encoder-decoder architectures shows that BARTABSA++ with BART-Large achieves state-of-the-art results, even surpassing a finetuned GPT-4o model.Our analysis indicates the encoder's representational quality is vital, while the decoder's role is minimal, explaining the limited benefits of scaling decoder-only LLMs for this task. These findings underscore the complementary roles of explicit structured modeling and large language models, indicating structured approaches remain competitive for tasks requiring precise relational information extraction.
%@ 979-8-89176-286-2
@inproceedings{pfister-etal-2025-bartabsa,
abstract = {We revisit the BARTABSA framework for aspect-based sentiment analysis with modern decoder LLMs to assess the importance of explicit structure modeling today. Our updated implementation - BARTABSA++ - features architectural enhancements that boost performance and training stability.Systematic testing with various encoder-decoder architectures shows that BARTABSA++ with BART-Large achieves state-of-the-art results, even surpassing a finetuned GPT-4o model.Our analysis indicates the encoder{'}s representational quality is vital, while the decoder{'}s role is minimal, explaining the limited benefits of scaling decoder-only LLMs for this task. These findings underscore the complementary roles of explicit structured modeling and large language models, indicating structured approaches remain competitive for tasks requiring precise relational information extraction.},
added-at = {2025-07-25T22:00:17.000+0200},
address = {Vienna, Austria},
author = {Pfister, Jan and V{\"o}lker, Tom and Vlasjuk, Anton and Hotho, Andreas},
biburl = {https://www.bibsonomy.org/bibtex/27d3bb70ca37648d06d79dca543817489/dmir},
booktitle = {Proceedings of the 1st Joint Workshop on Large Language Models and Structure Modeling (XLLM 2025)},
editor = {Fei, Hao and Tu, Kewei and Zhang, Yuhui and Hu, Xiang and Han, Wenjuan and Jia, Zixia and Zheng, Zilong and Cao, Yixin and Zhang, Meishan and Lu, Wei and Siddharth, N. and {\O}vrelid, Lilja and Xue, Nianwen and Zhang, Yue},
interhash = {dfafc307c5664f5a2f6d471450a2296a},
intrahash = {7d3bb70ca37648d06d79dca543817489},
isbn = {979-8-89176-286-2},
keywords = {myown author:vlasjuk author:völker author:hotho from:janpf author:pfister},
month = aug,
pages = {115--128},
publisher = {Association for Computational Linguistics},
timestamp = {2025-07-25T22:00:17.000+0200},
title = {{BARTABSA}++: Revisiting {BARTABSA} with Decoder {LLM}s},
url = {https://aclanthology.org/2025.xllm-1.13/},
year = 2025
}