The use of Large Language Models (LLMs) for code generation has gained significant attention in recent years. Existing methods often aim to improve the quality of generated code by incorporating additional contextual information or guidance into input prompts. Many of these approaches adopt process-oriented reasoning strategies, mimicking human-like step-by-step thinking; however, they may not always align with the structured nature of programming languages. This paper introduces Chain of Grounded Objectives (CGO), a concise goal-oriented prompting approach that embeds functional objectives into prompts to enhance code generation. By focusing on precisely defined objectives rather than explicit procedural steps, CGO aligns more naturally with programming tasks while retaining flexibility. Empirical evaluations on HumanEval, MBPP, their extended versions, and LiveCodeBench show that CGO achieves accuracy comparable to or better than existing methods while using fewer tokens, making it a more efficient approach to LLM-based code generation.
@InProceedings{yeo_et_al:LIPIcs.ECOOP.2025.35, author = {Yeo, Sangyeop and Hwang, Seung-Won and Ma, Yu-Seung}, title = {{Chain of Grounded Objectives: Concise Goal-Oriented Prompting for Code Generation}}, booktitle = {39th European Conference on Object-Oriented Programming (ECOOP 2025)}, pages = {35:1--35:25}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-373-7}, ISSN = {1868-8969}, year = {2025}, volume = {333}, editor = {Aldrich, Jonathan and Silva, Alexandra}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.ECOOP.2025.35}, URN = {urn:nbn:de:0030-drops-233271}, doi = {10.4230/LIPIcs.ECOOP.2025.35}, annote = {Keywords: Artificial Intelligence, Natural Language Processing, Prompt Design, Large Language Models, Code Generation} }
Feedback for Dagstuhl Publishing