Output Fixing Parser
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.output_parsers import PydanticOutputParser, OutputFixingParser
from langchain_core.prompts import PromptTemplate
load_dotenv()
api_key = os.getenv("GEMINI_API_KEY")
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", api_key=api_key)
# Step 1: Define structured output model
class PersonInfo(BaseModel):
name: str
age: int
# Step 2: Base parser
pydantic_parser = PydanticOutputParser(pydantic_object=PersonInfo)
# Step 3: Wrap with OutputFixingParser
fixing_parser = OutputFixingParser.from_llm(
parser=pydantic_parser,
llm=llm
)
# Step 4: Prompt
prompt = PromptTemplate(
template="Provide Elon Musk's name and age in JSON format like {{'name':'Elon Musk','age':51}}.",
input_variables=[]
)
# Step 5: Run LLM and parse output
response = llm.invoke(prompt.format())
parsed_output = fixing_parser.parse(response.content)
print(parsed_output)
print(parsed_output.name, parsed_output.age)Last updated