(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[28380],{83313:function(e,r,i){(window.__NEXT_P=window.__NEXT_P||[]).push(["/papers.de",function(){return i(4214)}])},4214:function(e,r,i){"use strict";i.r(r),i.d(r,{__toc:function(){return l}});var n=i(11527),a=i(77154),s=i(51592),t=i(52243);i(44064),i(76948);var o=i(5424);let l=[{depth:2,value:"\xdcberblicke",id:"\xfcberblicke"},{depth:2,value:"Ans\xe4tze",id:"ans\xe4tze"},{depth:2,value:"Anwendungen",id:"anwendungen"},{depth:2,value:"Sammlungen",id:"sammlungen"}];function _createMdxContent(e){let r=Object.assign({h1:"h1",p:"p",h2:"h2",ul:"ul",li:"li",a:"a"},(0,o.a)(),e.components);return(0,n.jsxs)(n.Fragment,{children:[(0,n.jsx)(r.h1,{children:"Papers"}),"\n",(0,n.jsx)(r.p,{children:"Die folgenden sind die neuesten Papers (sortiert nach Ver\xf6ffentlichungsdatum) zum Thema Prompt-Engineering f\xfcr gro\xdfe Sprachmodelle (Large Language Models, LLMS). Wir aktualisieren die Liste der Papers t\xe4glich/wochentlich."}),"\n",(0,n.jsx)(r.h2,{id:"\xfcberblicke",children:"\xdcberblicke"}),"\n",(0,n.jsxs)(r.ul,{children:["\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2401.14423",children:"Prompt Design and Engineering: Introduction and Advanced Methods"})," (Januar 2024)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2311.05232",children:"A Survey on Hallucination in Large Language Models: Principles,Taxonomy, Challenges, and Open Questions"})," (November 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.06147",children:"An RL Perspective on RLHF, Prompting, and Beyond"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.16938",children:"Few-shot Fine-tuning vs. In-context Learning: A Fair Comparison and Evaluation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13860",children:"Jailbreaking ChatGPT via Prompt Engineering: An Empirical Study"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.13712",children:"Harnessing the Power of LLMs in Practice: A Survey on ChatGPT and Beyond"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.08354",children:"Tool Learning with Foundation Models"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.06488",children:"One Small Step for Generative AI, One Giant Leap for AGI: A Complete Survey on ChatGPT in AIGC Era"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.02020",children:"A Bibliometric Review of Large Language Models Research from 2017 to 2023"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.18223",children:"A Survey of Large Language Models"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.14725",children:"Nature Language Reasoning, A Survey"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.07842",children:"Augmented Language Models: a Survey"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2301.00234",children:"A Survey for In-context Learning"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.10403",children:"Towards Reasoning in Large Language Models: A Survey"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.09597",children:"Reasoning with Language Model Prompting: A Survey"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2206.07682",children:"Emergent Abilities of Large Language Models"})," (Juni 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2204.13988",children:"A Taxonomy of Prompt Modifiers for Text-To-Image Generation"})," (April 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2107.13586",children:"Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing"})," (Juli 2021)"]}),"\n"]}),"\n",(0,n.jsx)(r.h2,{id:"ans\xe4tze",children:"Ans\xe4tze"}),"\n",(0,n.jsxs)(r.ul,{children:["\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2312.16171v1",children:"Principled Instructions Are All You Need for Questioning LLaMA-1/2, GPT-3.5/4"})," (Dezember 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.05029",children:"Walking Down the Memory Maze: Beyond Context Limit through Interactive Reading"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.01714",children:"Large Language Models as Analogical Reasoners"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.05736",children:"LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.06653",children:"Query-Dependent Prompt Evaluation and Optimization with Offline Inverse RL"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.11495",children:"Chain-of-Verification Reduces Hallucination in Large Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08532",children:"Connecting Large Language Models with Evolutionary Algorithms Yields Powerful Prompt Optimizers"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.04269",children:"From Sparse to Dense: GPT-4 Summarization with Chain of Density Prompting"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.06275",children:"Re-Reading Improves Reasoning in Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.09687v2",children:"Graph of Thoughts: Solving Elaborate Problems with Large Language Models"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2307.15337",children:"Skeleton-of-Thought: Large Language Models Can Do Parallel Decoding"})," (Juli 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00369",children:"Focused Prefix Tuning for Controllable Text Generation"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19500",children:"Exploring Lottery Prompts for Pre-trained Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19339",children:"Less Likely Brainstorming: Using Language Models to Generate Alternative Hypotheses"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.20050",children:"Let's Verify Step by Step"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.18787",children:"Universality and Limitations of Prompt Tuning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.16896",children:"MultiTool-CoT: GPT-3 Can Use Multiple External Tools with Chain of Thought Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14564v1",children:"PEARL: Prompting Large Language Models to Plan and Execute Actions Over Long Documents"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14992v1",children:"Reasoning with Language Model is Planning with World Model"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13733",children:"Self-Critique Prompting with Large Language Models for Inductive Instructions"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14106",children:"Better Zero-Shot Reasoning with Self-Adaptive Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14257",children:"Hierarchical Prompting Assists Large Language Model on Web Navigation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13246",children:"Interactive Natural Language Processing"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12740",children:"Can We Edit Factual Knowledge by In-Context Learning?"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12766",children:"In-Context Learning of Large Language Models Explained as Kernel Regression"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.04091v3",children:"Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12907",children:"Meta-in-context learning in large language models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11860",children:"Let's Sample Step by Step: Adaptive-Consistency for Efficient Reasoning with LLMs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11426",children:"Post Hoc Explanations of Language Models Can Improve Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11186",children:"Compress, Then Prompt: Improving Accuracy-Efficiency Trade-off of LLM Inference with Transferable Prompt"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11497",children:"TreePrompt: Learning to Compose Tree Prompts for Explainable Visual Grounding"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11430",children:"TELeR: A General Taxonomy of LLM Prompts for Benchmarking Complex Tasks"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11170",children:"Efficient Prompting via Dynamic In-Context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10998",children:"The Web Can Be Your Oyster for Improving Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10713",children:"Flatness-Aware Prompt Selection Improves Accuracy and Sample Efficiency"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10601",children:"Tree of Thoughts: Deliberate Problem Solving with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10649",children:"ZeroPrompt: Streaming Acoustic Encoders are Zero-Shot Masked LMs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10276",children:"Chain-of-Symbol Prompting Elicits Planning in Large Langauge Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09955",children:"CooK: Empowering General-Purpose Language Models with Modular and Collaborative Knowledge"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09731",children:'What In-Context Learning "Learns" In-Context: Disentangling Task Recognition and Task Learning'})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09993",children:"Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09656",children:"Satisfiability-Aided Language Models Using Declarative Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09137",children:"Pre-Training to Learn in Context"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.05970",children:"Boosted Prompt Ensembles for Large Language Models"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.05642",children:"Global Prompt Cell: A Portable Control Module for Effective Prompt"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.03843",children:"Why think step-by-step? Reasoning emerges from the locality of experience"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.03609",children:"Revisiting Automated Prompting: Are We Actually Doing Better?"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.01904",children:"REFINER: Reasoning Feedback on Intermediate Representations"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.11366",children:"Reflexion: an autonomous agent with dynamic memory and self-reflection"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.17760",children:'CAMEL: Communicative Agents for "Mind" Exploration of Large Scale Language Model Society'})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.17651v1",children:"Self-Refine: Iterative Refinement with Self-Feedback"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.13824",children:"kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.13283",children:"Visual-Language Prompt Tuning with Knowledge-guided Context Optimization"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.13217",children:"Fairness-guided Few-shot Prompting for Large Language Models"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.11315",children:"Context-faithful Prompting for Large Language Models"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.10475",children:"Is Prompt All You Need? No. A Comprehensive and Broader View of Instruction Learning"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.08518",children:"UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.07320",children:"Model-tuning Via Prompts Makes NLP Models Adversarially Robust"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.03922",children:"Structure Pretraining and Prompt Tuning for Knowledge Graph Transfer"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.03628",children:"CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.03846",children:"Larger language models do in-context learning differently"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.02913",children:"OpenICL: An Open-Source Framework for In-context Learning"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.02909",children:"Dynamic Prompting: A Unified Framework for Prompt Tuning"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.09014",children:"ART: Automatic multi-step reasoning and tool-use for large language models"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.02861",children:"Multitask Prompt Tuning Enables Parameter-Efficient Transfer Learning"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.02577",children:"Effectiveness of Data Augmentation for Prefix Tuning with Limited Data"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.01580",children:"Mixture of Soft Prompts for Controllable Data Generation"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.02151",children:"Prompt, Generate, then Cache: Cascade of Foundation Models makes Strong Few-shot Learners"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.00293",children:"How Robust is GPT-3.5 to Predecessors? A Comprehensive Study on Language Understanding Tasks"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/pdf/2302.10198.pdf",children:"Can ChatGPT Understand Too? A Comparative Study on ChatGPT and Fine-tuned BERT"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.14838",children:"EvoPrompting: Language Models for Code-Level Neural Architecture Search"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.14691",children:"In-Context Instruction Learning"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.02676",children:"Chain of Hindsight Aligns Language Models with Feedback"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.14045",children:"Language Is Not All You Need: Aligning Perception with Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12822",children:"Automatic Prompt Augmentation and Selection with Chain-of-Thought from Labeled Data"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12246",children:"Active Prompting with Chain-of-Thought for Large Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12173",children:"More than you've asked for: A Comprehensive Analysis of Novel Prompt Injection Threats to Application-Integrated Large Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.11382",children:"A Prompt Pattern Catalog to Enhance Prompt Engineering with ChatGPT"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.11520",children:"Guiding Large Language Models via Directional Stimulus Prompting"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.11521",children:"How Does In-Context Learning Help Prompt Tuning?"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.09236",children:"Scalable Prompt Generation for Semi-supervised Learning with Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.09185",children:"Bounding the Capabilities of Large Language Models in Open Text Generation with Prompt Constraints"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.07994",children:"\xc0-la-carte Prompt Tuning (APT): Combining Distinct Data Via Composable Prompting"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.08043",children:"GraphPrompt: Unifying Pre-Training and Downstream Tasks for Graph Neural Networks"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.07459",children:"The Capacity for Moral Self-Correction in Large Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.06868",children:"SwitchPrompt: Learning Domain-Specific Gated Soft Prompts for Classification in Low-Resource Domains"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.05619",children:"Evaluating the Robustness of Discrete Prompts"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.05698",children:"Compositional Exemplars for In-context Learning"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.03668",children:"Hard Prompts Made Easy: Gradient-Based Discrete Optimization for Prompt Tuning and Discovery"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.00923",children:"Multimodal Chain-of-Thought Reasoning in Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.00093",children:"Large Language Models Can Be Easily Distracted by Irrelevant Context"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.00618",children:"Synthetic Prompting: Generating Chain-of-Thought Demonstrations for Large Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2301.12314",children:"Progressive Prompts: Continual Learning for Language Models"})," (Januar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2301.08721",children:"Batch Prompting: Efficient Inference with LLM APIs"})," (Januar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.14024",children:"Demonstrate-Search-Predict: Composing retrieval and language models for knowledge-intensive NLP"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.08061",children:"On Second Thought, Let's Not Think Step by Step! Bias and Toxicity in Zero-Shot Reasoning"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.08073",children:"Constitutional AI: Harmlessness from AI Feedback"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.04092",children:"Successive Prompting for Decomposing Complex Questions"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.09561v1",children:"Large Language Models are reasoners with Self-Verification"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.09251",children:"Discovering Language Model Behaviors with Model-Written Evaluations"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.06713",children:"Structured Prompting: Scaling In-Context Learning to 1,000 Examples"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2211.10435",children:"PAL: Program-aided Language Models"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2211.01910",children:"Large Language Models Are Human-Level Prompt Engineers"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2211.09527",children:"Ignore Previous Prompt: Attack Techniques For Language Models"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.07321",children:"Machine Generated Text: A Comprehensive Survey of Threat Models and Detection Methods"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2211.09066",children:"Teaching Algorithmic Reasoning via In-context Learning"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2211.11875",children:"Enhancing Self-Consistency and Performance of Pre-Trained Language Models through Natural Language Inference"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://paperswithcode.com/paper/ask-me-anything-a-simple-strategy-for",children:"Ask Me Anything: A simple strategy for prompting language models"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.01296",children:"Recitation-Augmented Language Models"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.03629",children:"ReAct: Synergizing Reasoning and Acting in Language Models"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.09150",children:"Prompting GPT-3 To Be Reliable"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.02406",children:"Decomposed Prompting: A Modular Approach for Solving Complex Tasks"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.03493",children:"Automatic Chain of Thought Prompting in Large Language Models"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.01240v3",children:"Language Models Are Greedy Reasoners: A Systematic Formal Analysis of Chain-of-Thought"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2209.02128",children:"Evaluating the Susceptibility of Pre-Trained Language Models via Handcrafted Adversarial Examples"})," (September 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2209.14610",children:"Dynamic Prompt Learning via Policy Gradient for Semi-structured Mathematical Reasoning"})," (September 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2209.11755",children:"Promptagator: Few-shot Dense Retrieval From 8 Examples"})," (September 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2208.03299",children:"Atlas: Few-shot Learning with Retrieval Augmented Language Models"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2207.05987",children:"DocPrompting: Generating Code by Retrieving the Docs"})," (Juli 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2206.02336",children:"On the Advance of Making Language Models Better Reasoners"})," (Juni 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2205.11916",children:"Large Language Models are Zero-Shot Reasoners"})," (Mai 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2205.11822",children:"Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations"})," (Mai 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2205.00445",children:"MRKL Systems: A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning"})," (Mai 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://aclanthology.org/2022.acl-long.576/",children:"PPT: Pre-trained Prompt Tuning for Few-shot Learning"})," (Mqy 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2205.12390",children:"Toxicity Detection with Generative Prompt-based Inference"})," (Mai 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2205.01543",children:"Learning to Transfer Prompts for Text Generation"})," (Mai 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2205.03401",children:"The Unreliability of Explanations in Few-shot Prompting for Textual Reasoning"})," (Mai 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2204.13988",children:"A Taxonomy of Prompt Modifiers for Text-To-Image Generation"})," (April 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2203.06566",children:"PromptChainer: Chaining Large Language Model Prompts through Visual Programming"})," (M\xe4rz 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2203.11171",children:"Self-Consistency Improves Chain of Thought Reasoning in Language Models"})," (M\xe4rz 2022)"]}),"\n",(0,n.jsx)(r.li,{children:(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2203.02155",children:"Training language models to follow instructions with human feedback"})}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2202.12837",children:"Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?"})," (Februar 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2201.11903",children:"Chain of Thought Prompting Elicits Reasoning in Large Language Models"})," (Januar 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2112.00114",children:"Show Your Work: Scratchpads for Intermediate Computation with Language Models"})," (November 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2110.01691",children:"AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts"})," (Oktober 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2110.08387",children:"Generated Knowledge Prompting for Commonsense Reasoning"})," (Oktober 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2110.08207",children:"Multitask Prompted Training Enables Zero-Shot Task Generalization"})," (Oktober 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2109.07830",children:"Reframing Instructional Prompts to GPTk's Language"})," (September 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2109.06977",children:"Design Guidelines for Prompt Engineering Text-to-Image Generative Models"})," (September 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://aclanthology.org/2021.acl-long.295",children:"Making Pre-trained Language Models Better Few-shot Learners"})," (August 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2104.08786",children:"Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity"})," (April 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://aclanthology.org/2021.eacl-main.316",children:"BERTese: Learning to Speak to BERT"})," (April 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2104.08691",children:"The Power of Scale for Parameter-Efficient Prompt Tuning"})," (April 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2102.07350",children:"Prompt Programming for Large Language Models: Beyond the Few-Shot Paradigm"})," (Februar 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2102.09690",children:"Calibrate Before Use: Improving Few-Shot Performance of Language Models"})," (Februar 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2101.00190",children:"Prefix-Tuning: Optimizing Continuous Prompts for Generation"})," (Januar 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2101.00420",children:"Learning to Generate Task-Specific Adapters from Task Description"})," (Januar 2021)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2012.15723",children:"Making Pre-trained Language Models Better Few-shot Learners"})," (Dezember 2020)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://aclanthology.org/2020.emnlp-main.105/",children:"Learning from Task Descriptions"})," (November 2020)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2010.15980",children:"AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts"})," (Oktober 2020)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2005.14165",children:"Language Models are Few-Shot Learners"})," (Mai 2020)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00324/96460/How-Can-We-Know-What-Language-Models-Know",children:"How Can We Know What Language Models Know?"})," (Juli 2020)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2001.08361",children:"Scaling Laws for Neural Language Models"})," (Januar 2020)"]}),"\n"]}),"\n",(0,n.jsx)(r.h2,{id:"anwendungen",children:"Anwendungen"}),"\n",(0,n.jsxs)(r.ul,{children:["\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.09265",children:"PromptRE: Weakly-Supervised Document-Level Relation Extraction via Prompting-Based Data Programming"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.08395",children:"Prompting Large Language Models with Chain-of-Thought for Few-Shot Knowledge Base Question Generation"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.08123",children:"Who Wrote it and Why? Prompting Large-Language Models for Authorship Verification"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.08101",children:"Promptor: A Conversational and Autonomous Prompt Generation Agent for Intelligent Text Entry Techniques"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.03965",children:"Thought Propagation: An Analogical Approach to Complex Reasoning with Large Language Models"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.04269",children:"From Sparse to Dense: GPT-4 Summarization with Chain of Density Prompting"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.02304",children:"Self-Taught Optimizer (STOP): Recursively Self-Improving Code Generation"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.02226",children:"Think before you speak: Training Language Models With Pause Tokens"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.00867",children:"(Dynamic) Prompting might be all you need to repair Compressed LLMs"})," (Oktober 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.00313",children:"In-Context Learning in Large Language Models: A Neuroscience-inspired Analysis of Representations"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.00297",children:"Understanding In-Context Learning from Repetitions"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.00272",children:"Investigating the Efficacy of Large Language Models in Reflective Assessment Methods through Chain of Thoughts Prompting"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2310.00152",children:"Automatic Prompt Rewriting for Personalized Text Generation"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.17453",children:"Efficient Streaming Language Models with Attention Sinks"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.17421",children:"The Dawn of LMMs: Preliminary Explorations with GPT-4V(ision)"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.15427",children:"Graph Neural Prompting with Large Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.15025",children:"Large Language Model Alignment: A Survey"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.13339",children:"Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.13205",children:"A Practical Survey on Zero-shot Prompt Design for In-context Learning"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.10687",children:"EchoPrompt: Instructing the Model to Rephrase Queries for Improved In-context Learning"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.10359",children:"Prompt, Condition, and Generate: Classification of Unsupported Claims with In-Context Learning"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.10238",children:"PolicyGPT: Automated Analysis of Privacy Policies with Large Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.09708",children:"LLM4Jobs: Unsupervised occupation extraction and standardization leveraging Large Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.09558",children:"Summarization is (Almost) Dead"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.09444",children:"Investigating Zero- and Few-shot Generalization in Fact Verification"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.09338",children:"Performance of the Pre-Trained Large Language Model GPT-4 on Automated Short Answer Grading"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.09117",children:"Contrastive Decoding Improves Reasoning in Large Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08963",children:"Struc-Bench: Are Large Language Models Really Good at Generating Complex Structured Data?"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08590",children:"Neural Machine Translation Models Can Learn to be Few-shot Learners"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08589",children:"Chain-of-Thought Reasoning is a Policy Improvement Operator"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08583",children:"ICLEF: In-Context Learning with Expert Feedback for Explainable Style Transfer"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08541",children:"When do Generative Query and Document Expansions Fail? A Comprehensive Study Across Methods, Retrievers, and Datasets"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08491",children:"Using Large Language Models for Knowledge Engineering (LLMKE): A Case Study on Wikidata"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08303",children:"Self-Consistent Narrative Prompts on Abductive Natural Language Inference"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08210",children:"Investigating Answerability of LLMs for Long-Form Question Answering"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08140",children:"PromptTTS++: Controlling Speaker Identity in Prompt-Based Text-to-Speech Using Natural Language Descriptions"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.08008",children:"An Empirical Evaluation of Prompting Strategies for Large Language Models in Zero-Shot Clinical Natural Language Processing"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.07990",children:"Leveraging Contextual Information for Effective Entity Salience Detection"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.06135",children:"Prompting4Debugging: Red-Teaming Text-to-Image Diffusion Models by Finding Problematic Prompts"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.05833",children:"PACE: Prompting and Augmentation for Calibrated Confidence Estimation with GPT-4 in Cloud Incident Root Cause Analysis"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.04269",children:"From Sparse to Dense: GPT-4 Summarization with Chain of Density Prompting"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.04461",children:"Measuring and Improving Chain-of-Thought Reasoning in Vision-Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.02654",children:"Zero-Resource Hallucination Prevention for Large Language Models"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.02772",children:"Certifying LLM Safety against Adversarial Prompting"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2309.02772",children:"Improving Code Generation by Dynamic Temperature Sampling"})," (September 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.13479",children:"Prompting a Large Language Model to Generate Diverse Motivational Messages: A Comparison with Human-Written Messages"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.13032",children:"Financial News Analytics Using Fine-Tuned Llama 2 GPT Model"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.10335",children:"A Study on Robustness and Reliability of Large Language Model Code Generation"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.12890",children:"Large Language Models Vote: Prompting for Rare Disease Identification"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.09583",children:"WizardMath: Empowering Mathematical Reasoning for Large Language Models via Reinforced Evol-Instruct"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.09658",children:"Tree-of-Mixed-Thought: Combining Fast and Slow Thinking for Multi-hop Visual Reasoning"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.09687",children:"Graph of Thoughts: Solving Elaborate Problems with Large Language Models"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.09662",children:"Red-Teaming Large Language Models using Chain of Utterances for Safety-Alignment"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.08614",children:"Boosting Logical Reasoning in Large Language Models through a New Framework: The Graph of Thought"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.05596",children:"You Only Prompt Once: On the Capabilities of Prompt Learning on Large Language Models to Tackle Toxic Content"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2308.05481",children:"LLM As DBA"})," (August 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00784",children:"Interpretable Math Word Problem Solution Generation Via Step-by-step Planning"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00774",children:"In-Context Learning User Simulators for Task-Oriented Dialog Systems"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00739",children:"SQL-PaLM: Improved Large Language ModelAdaptation for Text-to-SQL"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00618",children:"Effective Structured Prompting by Meta-Learning and Representative Verbalizer"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00526",children:"Layout and Task Aware Instruction Prompt for Zero-shot Document Image Question Answering"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2306.00550",children:"Chain-Of-Thought Prompting Under Streaming Batch: A Case Study"})," (Juni 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19713",children:"Red Teaming Language Model Detectors with Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://shishirpatil.github.io/gorilla/",children:"Gorilla: Large Language Model Connected with Massive APIs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19835",children:"Deliberate then Generate: Enhanced Prompting Framework for Text Generation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19597",children:'What does the Failure to Reason with "Respectively" in Zero/Few-Shot Settings Tell Us about Language Models?'})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19426",children:"ScoNe: Benchmarking Negation Reasoning in Language Models With Fine-Tuning and In-Context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19308",children:"SheetCopilot: Bringing Software Productivity to the Next Level through Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19234",children:"Grammar Prompting for Domain-Specific Language Generation with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19148",children:"Mitigating Label Biases for In-context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.18638",children:"Short Answer Grading Using One-shot Prompting and Text Similarity Scoring Model"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.19165",children:"Strategic Reasoning with Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.18869",children:"Dissecting Chain-of-Thought: A Study on Compositional In-Context Learning of MLPs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.18189",children:"Marked Personas: Using Natural Language Prompts to Measure Stereotypes in Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.18170",children:"Leveraging Training Data in Few-Shot Prompting for Numerical Reasoning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.18156",children:"Exploring Effectiveness of GPT-3 in Grammatical Error Correction: A Study on Performance and Controllability in Prompt-Based Methods"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17826",children:"NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17812",children:"Tab-CoT: Zero-shot Tabular Chain of Thought"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17680",children:"Evaluating GPT-3 Generated Explanations for Hateful Content Moderation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17653",children:"Prompt-Guided Retrieval Augmentation for Non-Knowledge-Intensive Tasks"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:["[Zero- and Few-Shot Event Detection via Prompt-Based Meta Learning]",(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17373",children:"https://arxiv.org/abs/2305.17373"}),") (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17306",children:"Chain-of-Thought Hub: A Continuous Effort to Measure Large Language Models' Reasoning Performance"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17256",children:"Large Language Models Can be Lazy Learners: Analyze Shortcuts in In-Context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17147",children:"Heterogeneous Value Evaluation for Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17104",children:"PromptNER: Prompt Locating and Typing for Named Entity Recognition"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13514v1",children:"Small Language Models Improve Giants by Rewriting Their Outputs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.15771v1",children:"On the Planning Abilities of Large Language Models -- A Critical Investigation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.16582",children:"Beyond Chain-of-Thought, Effective Graph-of-Thought Reasoning in Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12600v1",children:"PRODIGY: Enabling In-context Learning Over Graphs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.15525v1",children:"Large Language Models are Few-Shot Health Learners"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.16367",children:"Role-Play with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13299v1",children:"Measuring Inductive Biases of In-Context Learning with Underspecified Demonstrations"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12744v1",children:"Fact-Checking Complex Claims with Program-Guided Reasoning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17126v1",children:"Large Language Models as Tool Makers"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13016v2",children:"Iterative Forward Tuning Boosts In-context Learning in Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.17390v1",children:"SwiftSage: A Generative Agent with Fast and Slow Thinking for Complex Interactive Tasks"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13246v1",children:"Interactive Natural Language Processing"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.02897v1",children:"An automatically discovered chain-of-thought prompt generalizes to novel models and datasets"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.08291v1",children:"Large Language Model Guided Tree-of-Thought"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.06983v1",children:"Active Retrieval Augmented Generation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12544v1",children:"A PhD Student's Perspective on Research in NLP in the Era of Very Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.02317v1",children:"Visual Chain of Thought: Bridging Logical Gaps with Multimodal Infillings"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09800v1",children:"Mirages: On Anthropomorphism in Dialogue Systems"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.15324v1",children:"Model evaluation for extreme risks"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.04388v1",children:"Language Models Don't Always Say What They Think: Unfaithful Explanations in Chain-of-Thought Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.02466v1",children:"Cognitive Reframing of Negative Thoughts through Human-Language Model Interaction"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13723",children:"PromptClass: Weakly-Supervised Text Classification with Prompting Enhanced Noise-Robust Self-Training"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.04757v2",children:"Augmented Large Language Models with Parametric Knowledge Guiding"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13735",children:"Aligning Large Language Models through Synthetic Feedback"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13775",children:"Concept-aware Training Improves In-context Learning Ability of Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.05176v1",children:"FrugalGPT: How to Use Large Language Models While Reducing Cost and Improving Performance"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13785",children:"Enhancing Black-Box Few-Shot Text Classification with Prompt-Based Data Augmentation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13817",children:"Detecting automatically the layout of clinical documents to enhance the performances of downstream natural language processing"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13826",children:'"Is the Pope Catholic?" Applying Chain-of-Thought Reasoning to Understanding Conversational Implicatures'})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13903",children:"Let's Think Frame by Frame: Evaluating Video Chain of Thought with Video Infilling and Prediction"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13917",children:"Generating Data for Symbolic Language with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13972",children:"Make a Choice! Knowledge Base Question Answering with In-Context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14002",children:"Improving Language Models via Plug-and-Play Retrieval Feedback"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14006",children:"Multi-Granularity Prompts for Topic Shift Detection in Dialogue"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14045",children:"The CoT Collection: Improving Zero-shot and Few-shot Learning of Language Models via Chain-of-Thought Fine-Tuning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14057",children:"Can Language Models Understand Physical Concepts?"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14069",children:"Evaluating Factual Consistency of Summaries with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14128",children:"Dr.ICL: Demonstration-Retrieved In-context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14171",children:"Probing in Context: Toward Building Robust Classifiers via Probing Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14210",children:"Skill-Based Few-Shot Selection for In-Context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14215",children:"Exploring Chain-of-Thought Style Prompting for Text-to-SQL"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14233",children:"Enhancing Chat Language Models by Scaling High-quality Instructional Conversations"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14239",children:"On Learning to Summarize with Large Language Models as References"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14259",children:"Learning to Generate Novel Scientific Directions with Contextualized Literature-based Discovery"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14264",children:"Active Learning Principles for In-Context Learning with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14279",children:"Two Failures of Self-Consistency in the Multi-Step Reasoning of LLMs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14325",children:"Improving Factuality and Reasoning in Language Models through Multiagent Debate"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14323",children:"ChatCoT: Tool-Augmented Chain-of-Thought Reasoning on\\ Chat-based Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14292",children:"WikiChat: A Few-Shot LLM-Based Chatbot Grounded with Wikipedia"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.14283",children:"Query Rewriting for Retrieval-Augmented Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13729",children:"Discrete Prompt Optimization via Constrained Generation for Zero-shot Re-ranker"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13412",children:"Element-aware Summarization with Large Language Models: Expert-aligned Evaluation and Chain-of-Thought Method"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13514",children:"Small Language Models Improve Giants by Rewriting Their Outputs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13626",children:"Prompting and Evaluating Large Language Models for Proactive Dialogues: Clarification, Target-guided, and Non-collaboration"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13660",children:"Prompt-Based Monte-Carlo Tree Search for Goal-Oriented Dialogue Policy Planning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13669",children:"Mitigating Language Model Hallucination with Interactive Question-Knowledge Alignment"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13068",children:"Making Language Models Better Tool Learners with Execution Feedback"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13073",children:"Text-to-SQL Error Correction with Language Models of Code"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13085",children:"Decomposed Prompting for Machine Translation Between Related Languages using Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13235",children:"SPARSEFIT: Few-shot Prompting with Sparse Fine-tuning for Jointly Generating Predictions and Natural Language Explanations"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13252",children:'"According to ..." Prompting Language Models Improves Quoting from Pre-Training Data'})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13264",children:"Prompt-based methods Mai underestimate large language models' linguistic generalizations"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13269",children:"Chain of Knowledge: A Framework for Grounding Large Language Models with Structured Knowledge Bases"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.13299",children:"Measuring Inductive Biases of In-Context Learning with Underspecified Demonstrations"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12576",children:"Automated Few-shot Classification with Instruction-Finetuned Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12586",children:"Enhancing Few-shot Text-to-SQL Capabilities of Large Language Models: A Study on Prompt Design Strategies"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12627",children:"MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12696",children:"Learning Interpretable Style Embeddings via Prompting LLMs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12723",children:"Enhancing Small Medical Learners with Privacy-preserving Contextual Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12744",children:"Fact-Checking Complex Claims with Program-Guided Reasoning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12749",children:"A Benchmark on Extremely Weakly Supervised Text Classification: Reconcile Seed Matching and Prompting Approaches"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12757",children:"This Prompt is Measuring <MASK>: Evaluating Bias Evaluation in Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12761",children:"Enhancing Cross-lingual Natural Language Inference by Soft Prompting with Multilingual Verbalizer"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12900",children:"Evaluating Prompt-based Question Answering for Object Prediction in the Open Research Knowledge Graph"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12535",children:"Explaining How Transformers Use Context to Build Predictions"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12392",children:"PiVe: Prompting with Iterative Verification Improving Graph-based Generative Capability of LLMs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12217",children:"PromptNER: A Prompting Method for Few-shot Named Entity Recognition via k Nearest Neighbor Search"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12295",children:"Logic-LM: Empowering Large Language Models with Symbolic Solvers for Faithful Logical Reasoning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11791",children:"Enhancing Few-shot NER with Prompt Ordering based Data Augmentation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11792",children:"Chain-of-thought prompting for responding to in-depth dialogue questions with LLM"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11853",children:"How to Prompt LLMs for Text-to-SQL: A Study in Zero-shot, Single-domain, and Cross-domain Settings"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11991",children:"Evaluation of medium-large Language Models at zero-shot closed book generative question answering"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12077",children:"Few-Shot Dialogue Summarization via Skeleton-Assisted Prompt Transfer"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.12096",children:"Can NLP Models Correctly Reason Over Contexts that Break the Common Assumptions?"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11255",children:"Reasoning Implicit Sentiment with Chain-of-Thought Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11334",children:"Writing your own book: A method for going from closed to open book QA to improve robustness and performance of smaller LLMs"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11366",children:"AutoTrial: Prompting Language Models for Clinical Trial Design"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11738",children:"CRITIC: Large Language Models Can Self-Correct with Tool-Interactive Critiquing"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11759",children:"Controlling the Extraction of Memorized Data from Large Language Models via Prompt-Tuning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11790",children:"Prompting with Pseudo-Code Instructions"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11171",children:"TrueTeacher: Learning Factual Consistency Evaluation with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11159",children:"Aligning Instruction Tasks Unlocks Large Language Models as Zero-Shot Relation Extractors"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11140",children:"Exploiting Biased Models to De-bias Text: A Gender-Fair Rewriting Model"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11038",children:"Learning In-context Learning for Named Entity Recognition"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10907",children:"Take a Break in the Middle: Investigating Subgoals towards Hierarchical Script Generation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10866",children:"TEPrompt: Task Enlightenment Prompt Learning for Implicit Discourse Relation Recognition"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10847",children:"Large Language Models can be Guided to Evade AI-Generated Text Detection"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10613",children:"Temporal Knowledge Graph Forecasting Without Knowledge Using In-Context Learning"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.11095",children:"Prompting the Hidden Talent of Web-Scale Speech Models for Zero-Shot Task Generalization"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10679",children:"Think Outside the Code: Brainstorming Boosts Large Language Models in Code Generation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.10142",children:"Improving Language Model Negotiation with Self-Play and In-Context Learning from AI Feedback"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09770",children:"ConvXAI: Delivering Heterogeneous AI Explanations via Conversations to Support Human-AI Scientific Writing"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09645",children:"StructGPT: A General Framework for Large Language Model to Reason over Structured Data"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09617",children:"Towards Expert-Level Medical Question Answering with Large Language Models"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09612",children:"Large Language Models are Built-in Autoregressive Search Engines"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09335",children:"MsPrompt: Multi-step Prompt Learning for Debiasing Few-shot Event Detection"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09312",children:"Exploring the Impact of Layer Normalization for Zero-shot Neural Machine Translation"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09067",children:"SGP-TOD: Building Task Bots Effortlessly via Schema-Guided LLM Prompting"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09333",children:"Multi-modal Visual Understanding with Prompts for Semantic Information Disentanglement of Image"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2305.09025",children:"Soft Prompt Decoding for Multilingual Dense Retrieval"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://ai.google/static/documents/palm2techreport.pdf",children:"PaLM 2 Technical Report"})," (Mai 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.06556",children:"Are LLMs All You Need for Task-Oriented Dialogue?"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.05973",children:"HiPrompt: Few-Shot Biomedical Knowledge Fusion via Hierarchy-Oriented Prompting"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.05253",children:"Approximating Human Evaluation of Social Chatbots with Prompting"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.04616",children:"Automated Reading Passage Generation with OpenAI's Large Language Model"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.04358",children:"WebBrain: Learning to Generate Factually Correct Articles for Queries by Grounding on Large Web Corpus"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.04704",children:"Prompt Pre-Training with Twenty-Thousand Classes for Open-Vocabulary Visual Recognition"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.02819",children:"GPT detectors are biased against non-native English writers"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.03153",children:"Zero-Shot Next-Item Recommendation using Large Pretrained Language Models"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.02213",children:"Large Language Models as Master Key: Unlocking the Secrets of Materials Science with GPT"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.01295",children:"Efficiently Aligned Cross-Lingual Transfer Learning for Conversational Tasks using Prompt-Tuning"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.01228",children:"Better Language Models of Code through Self-Improvement"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.01209",children:"PromptORE -- A Novel Approach Towards Fully Unsupervised Relation Extraction"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"",children:"Assessing Language Model Deployment with Risk Cards"})," (April 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2304.00116",children:"Enhancing Large Language Models with Climate Resources"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.17564",children:"BloombergGPT: A Large Language Model for Finance"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.17408",children:"Medical Intervention Duration Estimation Using Language-enhanced Transformer Encoder with Medical Prompts"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.15846",children:"Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.16434",children:"TaskMatrix.AI: Completing Tasks by Connecting Foundation Models with Millions of APIs"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.16445",children:"Larger Probes Tell a Different Story: Extending Psycholinguistic Datasets Via In-Context Learning"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.15587",children:"Linguistically Informed ChatGPT Prompts to Enhance Japanese-Chinese Machine Translation: A Case Study on Attributive Clauses"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.14375",children:"Knowledge-augmented Frame Semantic Parsing with Hybrid Prompt-tuning"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.15413",children:"Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.15441#",children:"Zero-shot Model Diagnosis"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.13592",children:"Prompting Large Language Models to Generate Code-Mixed Texts: The Case of South East Asian Languages"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.13035",children:"SPeC: A Soft Prompt-Based Calibration on Mitigating Performance Variability in Clinical Notes Summarization"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.11455",children:"Large Language Models and Simple, Stupid Bugs"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.09325",children:"Can Generative Pre-trained Transformers (GPT) Pass Assessments in Higher Education Programming Courses?"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.08896",children:"SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.07142",children:"Large Language Models in the Workplace: A Case Study on Prompt Engineering for Job Type Classification"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.05063",children:"ICL-D3IE: In-Context Learning with Diverse Demonstrations Updating for Document Information Extraction"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.05398",children:"MathPrompter: Mathematical Reasoning using Large Language Models"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.05400",children:"Prompt-Based Learning for Thread Structure Prediction in Cybersecurity Forums"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.03199",children:"Choice Over Control: How Users Write with Large Language Models using Diegetic and Non-Diegetic Prompting"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.01903",children:"Prompting Large Language Models with Answer Heuristics for Knowledge-based Visual Question Answering"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.00815",children:"Soft Prompt Guided Joint Learning for Cross-Domain Sentiment Analysis"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2303.00733",children:"SpeechPrompt v2: Prompt Tuning for Speech Classification Tasks"})," (M\xe4rz 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.14233",children:"Goal Driven Discovery of Distributional Differences via Language Descriptions"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.13439",children:"Navigating the Grey Area: Expressions of Overconfidence and Uncertainty in Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.14169",children:"TabGenie: A Toolkit for Table-to-Text Generation"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12449",children:"SGL-PT: A Strong Graph Learner with Graph Prompt Tuning"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12468",children:"Few-Shot Table-to-Text Generation with Prompt-based Adapter"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12692",children:"Language Models Are Few-shot Learners for Prognostic Prediction"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12784",children:"STA: Self-controlled Text Augmentation for Improving Text Classifications"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.12813",children:"Check Your Facts and Try Again: Improving Large Language Models with External Knowledge and Automated Feedback"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.10916",children:"How Generative AI models such as ChatGPT can be (Mis)Used in SPC Practice, Education, and Research? An Exploratory Study"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.08961",children:"Grimm in Wonderland: Prompt Engineering with Midjourney to Illustrate Fairytales"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.08068",children:"LabelPrompt: Effective Prompt-based Learning for Relation Classification"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.09236",children:"Language Model Crossover: Variation through Few-Shot Prompting"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.08102",children:"Prompt Tuning of Deep Neural Networks for Speaker-adaptive Visual Speech Recognition"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.07459",children:"The Capacity for Moral Self-Correction in Large Language Models"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.04156",children:"Prompting for Multimodal Hateful Meme Classification"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.03269",children:"PLACES: Prompting Language Models for Social Conversation Synthesis"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.04761",children:"Toolformer: Language Models Can Teach Themselves to Use Tools"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2302.01441",children:"Commonsense-Aware Prompting for Controllable Empathetic Dialogue Generation"})," (Februar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2301.12810",children:"Crawling the Internal Knowledge-Base of Language Models"})," (Januar 2023)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2212.02199",children:"Legal Prompt Engineering for Multilingual Legal Judgement Prediction"})," (Dezember 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2211.15462",children:"Investigating Prompt Engineering in Diffusion Models"})," (November 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2209.09513v2",children:"Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering"})," (September 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.15157",children:"Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2210.14699",children:"Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?"})," (Oktober 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://aclanthology.org/2022.inlg-main.5",children:"Plot Writing From Scratch Pre-Trained Language Models"})," (Juli 2022)"]}),"\n",(0,n.jsxs)(r.li,{children:[(0,n.jsx)(r.a,{href:"https://arxiv.org/abs/2202.03629",children:"Survey of Hallucination in Natural Language Generation"})," (Februar 2022)"]}),"\n"]}),"\n",(0,n.jsx)(r.h2,{id:"sammlungen",children:"Sammlungen"}),"\n",(0,n.jsxs)(r.ul,{children:["\n",(0,n.jsx)(r.li,{children:(0,n.jsx)(r.a,{href:"https://github.com/Timothyxxx/Chain-of-ThoughtsPapers",children:"Chain-of-Thought Papers"})}),"\n",(0,n.jsx)(r.li,{children:(0,n.jsx)(r.a,{href:"https://paperswithcode.com/task/prompt-engineering",children:"Papers mit Code"})}),"\n",(0,n.jsx)(r.li,{children:(0,n.jsx)(r.a,{href:"https://github.com/thunlp/PromptPapers#papers",children:"Prompt Papers"})}),"\n"]})]})}let h={MDXContent:function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},{wrapper:r}=Object.assign({},(0,o.a)(),e.components);return r?(0,n.jsx)(r,{...e,children:(0,n.jsx)(_createMdxContent,{...e})}):_createMdxContent(e)},pageOpts:{filePath:"pages/papers.de.mdx",route:"/papers",timestamp:172589822e4,pageMap:[{kind:"Meta",locale:"de",data:{index:"Prompt Engineering",introduction:"Einleitung",techniques:"Techniken",applications:"Anwendungen",prompts:"Prompt Hub",models:"Modelle",risks:"Risiken & Missbrauch",research:"LLM Forschungsergebnisse",papers:"Papers",tools:"Werkzeuge & Bibliotheken",notebooks:"Notebooks",datasets:"Datens\xe4tze",readings:"Zusatzlekt\xfcre",course:{title:"Prompt Engineering Kurs",type:"page"},services:{title:"Services",type:"page"},about:{title:"\xdcber",type:"page"}}},{kind:"MdxPage",name:"about",route:"/about",locale:"de"},{kind:"Folder",name:"applications",route:"/applications",children:[{kind:"Meta",locale:"de",data:{function_calling:"Funktionsaufrufe",generating:"Generierung von Daten",synthetic_rag:"Generierung eines synthetischen Datensatzes f\xfcr RAG",generating_textbooks:"Umgang mit generierten Datens\xe4tzen und deren Vielfalt",coding:"Codegenerierung",workplace_casestudy:"Fallstudie zur Klassifizierung von Absolventenjobs",pf:"Prompt-Funktion"}},{kind:"MdxPage",name:"coding",route:"/applications/coding",locale:"de"},{kind:"MdxPage",name:"function_calling",route:"/applications/function_calling",locale:"de"},{kind:"MdxPage",name:"generating",route:"/applications/generating",locale:"de"},{kind:"MdxPage",name:"generating_textbooks",route:"/applications/generating_textbooks",locale:"de"},{kind:"MdxPage",name:"pf",route:"/applications/pf",locale:"de"},{kind:"MdxPage",name:"synthetic_rag",route:"/applications/synthetic_rag",locale:"de"},{kind:"MdxPage",name:"workplace_casestudy",route:"/applications/workplace_casestudy",locale:"de"},{kind:"MdxPage",name:"context-caching",route:"/applications/context-caching",locale:"en"},{kind:"MdxPage",name:"finetuning-gpt4o",route:"/applications/finetuning-gpt4o",locale:"en"}]},{kind:"MdxPage",name:"applications",route:"/applications",locale:"de"},{kind:"MdxPage",name:"course",route:"/course",locale:"de"},{kind:"MdxPage",name:"datasets",route:"/datasets",locale:"de"},{kind:"Folder",name:"guides",route:"/guides",children:[{kind:"Meta",locale:"en",data:{"optimizing-prompts":"Optimizing Prompts"}},{kind:"MdxPage",name:"optimizing-prompts",route:"/guides/optimizing-prompts",locale:"en"}]},{kind:"MdxPage",name:"index",route:"/",locale:"de"},{kind:"Folder",name:"introduction",route:"/introduction",children:[{kind:"Meta",locale:"de",data:{settings:"LLM Einstellungen",basics:"Grundlagen des Promptings",elements:"Elemente eines Prompts",tips:"Allgemeine Tipps f\xfcr das Entwerfen von Prompts",examples:"Beispiel f\xfcr Prompts"}},{kind:"MdxPage",name:"basics",route:"/introduction/basics",locale:"de"},{kind:"MdxPage",name:"elements",route:"/introduction/elements",locale:"de"},{kind:"MdxPage",name:"examples",route:"/introduction/examples",locale:"de"},{kind:"MdxPage",name:"settings",route:"/introduction/settings",locale:"de"},{kind:"MdxPage",name:"tips",route:"/introduction/tips",locale:"de"}]},{kind:"MdxPage",name:"introduction",route:"/introduction",locale:"de"},{kind:"Folder",name:"models",route:"/models",children:[{kind:"Meta",locale:"de",data:{chatgpt:"ChatGPT","claude-3":"Claude 3","code-llama":"Code Llama",flan:"Flan",gemini:"Gemini","gemini-advanced":"Gemini Advanced","gemini-pro":"Gemini 1.5 Pro",gemma:"Gemma","gpt-4":"GPT-4","grok-1":"Grok-1",llama:"LLaMA","llama-3":"Llama 3","mistral-7b":"Mistral 7B","mistral-large":"Mistral Large",mixtral:"Mixtral","mixtral-8x22b":"Mixtral 8x22B",olmo:"OLMo","phi-2":"Phi-2",sora:"Sora",collection:"LLM-Sammlung"}},{kind:"MdxPage",name:"chatgpt",route:"/models/chatgpt",locale:"de"},{kind:"MdxPage",name:"claude-3",route:"/models/claude-3",locale:"de"},{kind:"MdxPage",name:"code-llama",route:"/models/code-llama",locale:"de"},{kind:"MdxPage",name:"collection",route:"/models/collection",locale:"de"},{kind:"MdxPage",name:"flan",route:"/models/flan",locale:"de"},{kind:"MdxPage",name:"gemini-advanced",route:"/models/gemini-advanced",locale:"de"},{kind:"MdxPage",name:"gemini-pro",route:"/models/gemini-pro",locale:"de"},{kind:"MdxPage",name:"gemini",route:"/models/gemini",locale:"de"},{kind:"MdxPage",name:"gemma",route:"/models/gemma",locale:"de"},{kind:"MdxPage",name:"gpt-4",route:"/models/gpt-4",locale:"de"},{kind:"MdxPage",name:"grok-1",route:"/models/grok-1",locale:"de"},{kind:"MdxPage",name:"llama-3",route:"/models/llama-3",locale:"de"},{kind:"MdxPage",name:"llama",route:"/models/llama",locale:"de"},{kind:"MdxPage",name:"mistral-7b",route:"/models/mistral-7b",locale:"de"},{kind:"MdxPage",name:"mistral-large",route:"/models/mistral-large",locale:"de"},{kind:"MdxPage",name:"mixtral-8x22b",route:"/models/mixtral-8x22b",locale:"de"},{kind:"MdxPage",name:"mixtral",route:"/models/mixtral",locale:"de"},{kind:"MdxPage",name:"olmo",route:"/models/olmo",locale:"de"},{kind:"MdxPage",name:"phi-2",route:"/models/phi-2",locale:"de"},{kind:"MdxPage",name:"sora",route:"/models/sora",locale:"de"}]},{kind:"MdxPage",name:"models",route:"/models",locale:"de"},{kind:"MdxPage",name:"notebooks",route:"/notebooks",locale:"de"},{kind:"MdxPage",name:"papers",route:"/papers",locale:"de"},{kind:"Folder",name:"prompts",route:"/prompts",children:[{kind:"Meta",locale:"de",data:{classification:"Klassifizierung",coding:"Coding",creativity:"Kreativit\xe4t",evaluation:"Evaluation","information-extraction":"Informationsextraktion","image-generation":"Bildgenerierung",mathematics:"Mathematik  ","question-answering":"Fragebeantwortung",reasoning:"Schlussfolgerungen","text-summarization":"Textzusammenfassung",truthfulness:"Wahrhaftigkeit","adversarial-prompting":"Adversariales Prompting"}},{kind:"Folder",name:"adversarial-prompting",route:"/prompts/adversarial-prompting",children:[{kind:"Meta",locale:"de",data:{"prompt-injection":"Prompt Injection","prompt-leaking":"Prompt Leaking","jailbreaking-llms":"Jailbreaking"}},{kind:"MdxPage",name:"jailbreaking-llms",route:"/prompts/adversarial-prompting/jailbreaking-llms",locale:"de"},{kind:"MdxPage",name:"prompt-injection",route:"/prompts/adversarial-prompting/prompt-injection",locale:"de"},{kind:"MdxPage",name:"prompt-leaking",route:"/prompts/adversarial-prompting/prompt-leaking",locale:"de"}]},{kind:"MdxPage",name:"adversarial-prompting",route:"/prompts/adversarial-prompting",locale:"de"},{kind:"Folder",name:"classification",route:"/prompts/classification",children:[{kind:"Meta",locale:"de",data:{sentiment:"Sentimentklassifikation","sentiment-fewshot":"Few-Shot Sentimentklassifikation"}},{kind:"MdxPage",name:"sentiment-fewshot",route:"/prompts/classification/sentiment-fewshot",locale:"de"},{kind:"MdxPage",name:"sentiment",route:"/prompts/classification/sentiment",locale:"de"}]},{kind:"MdxPage",name:"classification",route:"/prompts/classification",locale:"de"},{kind:"Folder",name:"coding",route:"/prompts/coding",children:[{kind:"Meta",locale:"de",data:{"code-snippet":"Code-Snippets generieren","mysql-query":"Erzeugen von MySQL-Queries",tikz:"TiKZ-Diagramm zeichnen"}},{kind:"MdxPage",name:"code-snippet",route:"/prompts/coding/code-snippet",locale:"de"},{kind:"MdxPage",name:"mysql-query",route:"/prompts/coding/mysql-query",locale:"de"},{kind:"MdxPage",name:"tikz",route:"/prompts/coding/tikz",locale:"de"}]},{kind:"MdxPage",name:"coding",route:"/prompts/coding",locale:"de"},{kind:"Folder",name:"creativity",route:"/prompts/creativity",children:[{kind:"Meta",locale:"de",data:{rhymes:"Reime","infinite-primes":"Unendlichkeit der Primzahlen",interdisciplinary:"Interdisziplin\xe4re Aufgaben","new-words":"Erfindung neuer W\xf6rter"}},{kind:"MdxPage",name:"infinite-primes",route:"/prompts/creativity/infinite-primes",locale:"de"},{kind:"MdxPage",name:"interdisciplinary",route:"/prompts/creativity/interdisciplinary",locale:"de"},{kind:"MdxPage",name:"new-words",route:"/prompts/creativity/new-words",locale:"de"},{kind:"MdxPage",name:"rhymes",route:"/prompts/creativity/rhymes",locale:"de"}]},{kind:"MdxPage",name:"creativity",route:"/prompts/creativity",locale:"de"},{kind:"Folder",name:"evaluation",route:"/prompts/evaluation",children:[{kind:"Meta",locale:"de",data:{"plato-dialogue":"Platons Dialog bewerten"}},{kind:"MdxPage",name:"plato-dialogue",route:"/prompts/evaluation/plato-dialogue",locale:"de"}]},{kind:"MdxPage",name:"evaluation",route:"/prompts/evaluation",locale:"de"},{kind:"Folder",name:"image-generation",route:"/prompts/image-generation",children:[{kind:"Meta",locale:"de",data:{"alphabet-person":"Eine Person mit Alphabet-Buchstaben zeichnen"}},{kind:"MdxPage",name:"alphabet-person",route:"/prompts/image-generation/alphabet-person",locale:"de"}]},{kind:"MdxPage",name:"image-generation",route:"/prompts/image-generation",locale:"de"},{kind:"Folder",name:"information-extraction",route:"/prompts/information-extraction",children:[{kind:"Meta",locale:"de",data:{"extract-models":"Modellnamen extrahieren"}},{kind:"MdxPage",name:"extract-models",route:"/prompts/information-extraction/extract-models",locale:"de"}]},{kind:"MdxPage",name:"information-extraction",route:"/prompts/information-extraction",locale:"de"},{kind:"Folder",name:"mathematics",route:"/prompts/mathematics",children:[{kind:"Meta",locale:"de",data:{"composite-functions":"Auswertung zusammengesetzter Funktionen","odd-numbers":"Ungerade Zahlen addieren"}},{kind:"MdxPage",name:"composite-functions",route:"/prompts/mathematics/composite-functions",locale:"de"},{kind:"MdxPage",name:"odd-numbers",route:"/prompts/mathematics/odd-numbers",locale:"de"}]},{kind:"MdxPage",name:"mathematics",route:"/prompts/mathematics",locale:"de"},{kind:"Folder",name:"question-answering",route:"/prompts/question-answering",children:[{kind:"Meta",locale:"de",data:{"closed-domain":"Geschlossene Dom\xe4nen-Fragenbeantwortung","open-domain":"Offene Dom\xe4nen-Fragenbeantwortung","science-qa":"Wissenschaftliches Frage-Antworten"}},{kind:"MdxPage",name:"closed-domain",route:"/prompts/question-answering/closed-domain",locale:"de"},{kind:"MdxPage",name:"open-domain",route:"/prompts/question-answering/open-domain",locale:"de"},{kind:"MdxPage",name:"science-qa",route:"/prompts/question-answering/science-qa",locale:"de"}]},{kind:"MdxPage",name:"question-answering",route:"/prompts/question-answering",locale:"de"},{kind:"Folder",name:"reasoning",route:"/prompts/reasoning",children:[{kind:"Meta",locale:"de",data:{"indirect-reasoning":"Indirektes Reasoning","physical-reasoning":"Physisches Reasoning"}},{kind:"MdxPage",name:"indirect-reasoning",route:"/prompts/reasoning/indirect-reasoning",locale:"de"},{kind:"MdxPage",name:"physical-reasoning",route:"/prompts/reasoning/physical-reasoning",locale:"de"}]},{kind:"MdxPage",name:"reasoning",route:"/prompts/reasoning",locale:"de"},{kind:"Folder",name:"text-summarization",route:"/prompts/text-summarization",children:[{kind:"Meta",locale:"de",data:{"explain-concept":"Konzepte erkl\xe4ren"}},{kind:"MdxPage",name:"explain-concept",route:"/prompts/text-summarization/explain-concept",locale:"de"}]},{kind:"MdxPage",name:"text-summarization",route:"/prompts/text-summarization",locale:"de"},{kind:"Folder",name:"truthfulness",route:"/prompts/truthfulness",children:[{kind:"Meta",locale:"de",data:{"identify-hallucination":"Identifizieren von Halluzination"}},{kind:"MdxPage",name:"identify-hallucination",route:"/prompts/truthfulness/identify-hallucination",locale:"de"}]},{kind:"MdxPage",name:"truthfulness",route:"/prompts/truthfulness",locale:"de"}]},{kind:"MdxPage",name:"prompts",route:"/prompts",locale:"de"},{kind:"MdxPage",name:"readings",route:"/readings",locale:"de"},{kind:"Folder",name:"research",route:"/research",children:[{kind:"Meta",locale:"de",data:{"llm-agents":"LLM Agenten",rag:"RAG f\xfcr LLMs","llm-reasoning":"LLM Reasoning","guided-cot":"LM-gef\xfchrtes CoT",rag_hallucinations:"RAG Reduziert Halluzination",synthetic_data:"Synthetische Daten","rag-faithfulness":"RAG Zuverl\xe4ssigkeit","llm-recall":"LLM In-Context Recall",thoughtsculpt:"ThoughtSculpt","infini-attention":"Infini-Attention","trustworthiness-in-llms":"Vertrauensw\xfcrdigkeit in LLMs","llm-tokenization":"LLM Tokenisierung",groq:"Was ist Groq?"}},{kind:"MdxPage",name:"groq",route:"/research/groq",locale:"de"},{kind:"MdxPage",name:"guided-cot",route:"/research/guided-cot",locale:"de"},{kind:"MdxPage",name:"infini-attention",route:"/research/infini-attention",locale:"de"},{kind:"MdxPage",name:"llm-agents",route:"/research/llm-agents",locale:"de"},{kind:"MdxPage",name:"llm-reasoning",route:"/research/llm-reasoning",locale:"de"},{kind:"MdxPage",name:"llm-recall",route:"/research/llm-recall",locale:"de"},{kind:"MdxPage",name:"llm-tokenization",route:"/research/llm-tokenization",locale:"de"},{kind:"MdxPage",name:"rag-faithfulness",route:"/research/rag-faithfulness",locale:"de"},{kind:"MdxPage",name:"rag",route:"/research/rag",locale:"de"},{kind:"MdxPage",name:"rag_hallucinations",route:"/research/rag_hallucinations",locale:"de"},{kind:"MdxPage",name:"synthetic_data",route:"/research/synthetic_data",locale:"de"},{kind:"MdxPage",name:"thoughtsculpt",route:"/research/thoughtsculpt",locale:"de"},{kind:"MdxPage",name:"trustworthiness-in-llms",route:"/research/trustworthiness-in-llms",locale:"de"}]},{kind:"MdxPage",name:"research",route:"/research",locale:"de"},{kind:"Folder",name:"risks",route:"/risks",children:[{kind:"Meta",locale:"de",data:{adversarial:"Adversariales Prompting",factuality:"Faktentreue",biases:"Verzerrungen (biases)"}},{kind:"MdxPage",name:"adversarial",route:"/risks/adversarial",locale:"de"},{kind:"MdxPage",name:"biases",route:"/risks/biases",locale:"de"},{kind:"MdxPage",name:"factuality",route:"/risks/factuality",locale:"de"}]},{kind:"MdxPage",name:"risks",route:"/risks",locale:"de"},{kind:"MdxPage",name:"services",route:"/services",locale:"de"},{kind:"Folder",name:"techniques",route:"/techniques",children:[{kind:"Meta",locale:"de",data:{zeroshot:"Zero-Shot Prompting",fewshot:"Few-Shot Prompting",cot:"Chain-of-Thought Prompting",consistency:"Selbstkonsistenz",knowledge:"Generiertes Wissens-Prompting",prompt_chaining:"Prompt Chaining",tot:"Tree of Thoughts",rag:"Retrieval Augmented Generation",art:"Automatic Reasoning and Tool-use",ape:"Automatic Prompt Engineer",activeprompt:"Active-Prompt",dsp:"Directional Stimulus Prompting",pal:"Program-Aided Language Models",react:"ReAct",reflexion:"Reflexion",multimodalcot:"Multimodal CoT",graph:"Graph-Prompting"}},{kind:"MdxPage",name:"activeprompt",route:"/techniques/activeprompt",locale:"de"},{kind:"MdxPage",name:"ape",route:"/techniques/ape",locale:"de"},{kind:"MdxPage",name:"art",route:"/techniques/art",locale:"de"},{kind:"MdxPage",name:"consistency",route:"/techniques/consistency",locale:"de"},{kind:"MdxPage",name:"cot",route:"/techniques/cot",locale:"de"},{kind:"MdxPage",name:"dsp",route:"/techniques/dsp",locale:"de"},{kind:"MdxPage",name:"fewshot",route:"/techniques/fewshot",locale:"de"},{kind:"MdxPage",name:"graph",route:"/techniques/graph",locale:"de"},{kind:"MdxPage",name:"knowledge",route:"/techniques/knowledge",locale:"de"},{kind:"MdxPage",name:"multimodalcot",route:"/techniques/multimodalcot",locale:"de"},{kind:"MdxPage",name:"pal",route:"/techniques/pal",locale:"de"},{kind:"MdxPage",name:"prompt_chaining",route:"/techniques/prompt_chaining",locale:"de"},{kind:"MdxPage",name:"rag",route:"/techniques/rag",locale:"de"},{kind:"MdxPage",name:"react",route:"/techniques/react",locale:"de"},{kind:"MdxPage",name:"reflexion",route:"/techniques/reflexion",locale:"de"},{kind:"MdxPage",name:"tot",route:"/techniques/tot",locale:"de"},{kind:"MdxPage",name:"zeroshot",route:"/techniques/zeroshot",locale:"de"},{kind:"MdxPage",name:"meta-prompting",route:"/techniques/meta-prompting",locale:"en"}]},{kind:"MdxPage",name:"techniques",route:"/techniques",locale:"de"},{kind:"MdxPage",name:"tools",route:"/tools",locale:"de"}],flexsearch:{codeblocks:!0},title:"Papers",headings:l},pageNextRoute:"/papers.de",nextraLayout:s.ZP,themeConfig:t.Z};r.default=(0,a.j)(h)},52243:function(e,r,i){"use strict";i.d(r,{Z:function(){return l}});var n=i(11527),a=i(50959),s=i(51592),t=i(86259);function WordWrapIcon(e){return(0,n.jsx)("svg",{viewBox:"0 0 24 24",width:"24",height:"24",...e,children:(0,n.jsx)("path",{fill:"currentColor",d:"M4 19h6v-2H4v2zM20 5H4v2h16V5zm-3 6H4v2h13.25c1.1 0 2 .9 2 2s-.9 2-2 2H15v-2l-3 3l3 3v-2h2c2.21 0 4-1.79 4-4s-1.79-4-4-4z"})})}let Button=e=>{let{children:r,className:i,...a}=e;return(0,n.jsx)("button",{className:(0,t.Z)("nextra-button nx-transition-all active:nx-opacity-50","nx-bg-primary-700/5 nx-border nx-border-black/5 nx-text-gray-600 hover:nx-text-gray-900 nx-rounded-md nx-p-1.5","dark:nx-bg-primary-300/10 dark:nx-border-white/10 dark:nx-text-gray-400 dark:hover:nx-text-gray-50",i),...a,children:r})};function CheckIcon(e){return(0,n.jsx)("svg",{viewBox:"0 0 20 20",width:"1em",height:"1em",fill:"currentColor",...e,children:(0,n.jsx)("path",{fillRule:"evenodd",d:"M16.707 5.293a1 1 0 010 1.414l-8 8a1 1 0 01-1.414 0l-4-4a1 1 0 011.414-1.414L8 12.586l7.293-7.293a1 1 0 011.414 0z",clipRule:"evenodd"})})}function CopyIcon(e){return(0,n.jsxs)("svg",{width:"24",height:"24",viewBox:"0 0 24 24",fill:"none",xmlns:"http://www.w3.org/2000/svg",stroke:"currentColor",...e,children:[(0,n.jsx)("rect",{x:"9",y:"9",width:"13",height:"13",rx:"2",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"}),(0,n.jsx)("path",{d:"M5 15H4C2.89543 15 2 14.1046 2 13V4C2 2.89543 2.89543 2 4 2H13C14.1046 2 15 2.89543 15 4V5",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"})]})}let CopyToClipboard=e=>{let{getValue:r,...i}=e,[s,t]=(0,a.useState)(!1);(0,a.useEffect)(()=>{if(!s)return;let e=setTimeout(()=>{t(!1)},2e3);return()=>{clearTimeout(e)}},[s]);let o=(0,a.useCallback)(async()=>{var e;t(!0),(null===(e=navigator)||void 0===e?void 0:e.clipboard)||console.error("Access to clipboard rejected!");try{await navigator.clipboard.writeText(r())}catch(e){console.error("Failed to copy!")}},[r]),l=s?CheckIcon:CopyIcon;return(0,n.jsx)(Button,{onClick:o,title:"Copy code",tabIndex:0,...i,children:(0,n.jsx)(l,{className:"nextra-copy-icon nx-pointer-events-none nx-h-4 nx-w-4"})})},o={logo:(0,n.jsxs)(n.Fragment,{children:[(0,n.jsxs)("svg",{xmlns:"http://www.w3.org/2000/svg",width:"24",height:"24",viewBox:"0 0 206 246",fill:"none",children:[(0,n.jsx)("circle",{cx:"40",cy:"40",r:"40",fill:"currentColor"}),(0,n.jsx)("circle",{cx:"40",cy:"206",r:"40",fill:"currentColor"}),(0,n.jsx)("circle",{cx:"166",cy:"120",r:"40",fill:"currentColor"})]}),(0,n.jsx)("span",{style:{marginLeft:".4em",fontWeight:800},children:"Prompt Engineering Guide"})]}),i18n:[{locale:"en",text:"English"},{locale:"zh",text:"中文"},{locale:"jp",text:"日本語"},{locale:"pt",text:"Portugu\xeas"},{locale:"it",text:"Italian"},{locale:"tr",text:"T\xfcrk\xe7e"},{locale:"es",text:"Espa\xf1ol"},{locale:"fr",text:"Fran\xe7ais"},{locale:"kr",text:"한국어"},{locale:"ca",text:"Catal\xe0"},{locale:"fi",text:"Finnish"},{locale:"ru",text:"Русский"},{locale:"de",text:"Deutsch"},{locale:"ar",text:"العربية"}],head:function(){let{title:e}=(0,s.ZR)();return(0,n.jsxs)(n.Fragment,{children:[(0,n.jsxs)("title",{children:[e?e+" | Prompt Engineering Guide":"Prompt Engineering Guide"," "]}),(0,n.jsx)("meta",{name:"viewport",content:"width=device-width, initial-scale=1.0"}),(0,n.jsx)("meta",{property:"og:title",content:"Prompt Engineering Guide"}),(0,n.jsx)("meta",{property:"og:description",content:"A Comprehensive Overview of Prompt Engineering"}),(0,n.jsx)("meta",{name:"og:title",content:e?e+" | Prompt Engineering Guide":"Prompt Engineering Guide"}),(0,n.jsx)("link",{rel:"icon",href:"/144-favicon.svg",type:"image/svg+xml"}),(0,n.jsx)("link",{rel:"icon",href:"/144-favicon-dark.svg",type:"image/svg+xml",media:"(prefers-color-scheme: dark)"})]})},project:{link:"https://github.com/dair-ai/Prompt-Engineering-Guide"},chat:{link:"https://discord.gg/FUyz9vPAwf"},docsRepositoryBase:"https://github.com/dair-ai/Prompt-Engineering-Guide/tree/main/",footer:{text:"Copyright \xa9 2024 DAIR.AI"},search:{placeholder:"Search..."},components:{pre:e=>{let{children:r,className:i,hasCopyCode:s=!0,filename:o,...l}=e,h=(0,a.useRef)(null),d=(0,a.useCallback)(()=>{let e=document.documentElement.dataset,r="nextraWordWrap"in e;r?delete e.nextraWordWrap:e.nextraWordWrap=""},[]);return(0,n.jsxs)("div",{className:"nextra-code-block nx-relative nx-mt-6 first:nx-mt-0",children:[o&&(0,n.jsx)("div",{className:"nx-absolute nx-top-0 nx-z-[1] nx-w-full nx-truncate nx-rounded-t-xl nx-bg-primary-700/5 nx-py-2 nx-px-4 nx-text-xs nx-text-gray-700 dark:nx-bg-primary-300/10 dark:nx-text-gray-200",children:o}),(0,n.jsx)("pre",{className:(0,t.Z)("nx-bg-primary-700/5 nx-mb-4 nx-overflow-x-auto nx-rounded-xl nx-subpixel-antialiased dark:nx-bg-primary-300/10 nx-text-[.9em]","contrast-more:nx-border contrast-more:nx-border-primary-900/20 contrast-more:nx-contrast-150 contrast-more:dark:nx-border-primary-100/40",o?"nx-pt-12 nx-pb-4":"nx-py-4",i),ref:h,...l,children:a.isValidElement(r)&&"code"===r.type?r.props.children:r}),(0,n.jsxs)("div",{className:(0,t.Z)("nx-opacity-0 nx-transition [div:hover>&]:nx-opacity-100 focus-within:nx-opacity-100","nx-flex nx-gap-1 nx-absolute nx-m-[11px] nx-right-0",o?"nx-top-8":"nx-top-0"),children:[(0,n.jsx)(Button,{onClick:d,className:"md:nx-hidden",title:"Toggle word wrap elvis",children:(0,n.jsx)(WordWrapIcon,{className:"nx-pointer-events-none nx-h-4 nx-w-4"})}),s&&(0,n.jsx)(CopyToClipboard,{getValue:()=>{var e,r;return(null===(r=h.current)||void 0===r?void 0:null===(e=r.querySelector("code"))||void 0===e?void 0:e.textContent)||""}})]})]})}}};var l=o}},function(e){e.O(0,[47262,49774,92888,40179],function(){return e(e.s=83313)}),_N_E=e.O()}]);