Need some help converting charts to run local (waiting for my langraph cloud beta!)
deanchanter opened this issue · 1 comments
deanchanter commented
@bracesproul Please help
I am getting what I think is a good trace in langsmith I see all the outputs from the agents and but I keep getting this error
Here's what I have done so far...
New server.py
import uvicorn
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from langserve import add_routes
from gen_ui_backend.charts.chain import create_graph
from gen_ui_backend.charts.schema import ChatInputType,DataDisplayTypeAndDescription
# Load environment variables from .env file
load_dotenv()
def start() -> None:
app = FastAPI(
title="Gen UI Backend",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
)
# Configure CORS
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
graph = create_graph()
runnable = graph.with_types(input_type=dict, output_type=dict)
add_routes(app, runnable, path="/charts", playground_type="default")
print("Starting server...")
uvicorn.run(app, host="0.0.0.0", port=8000)
Updates to agent.tsx to use FastApI
async function filterGraph(inputs: FilterGraphInput) {
"use server";
const remoteRunnable = new RemoteRunnable({
url: API_URL,
});
/*const client = new Client({
apiUrl: process.env.LANGGRAPH_CLOUD_API_URL,
defaultHeaders: {
"X-API-KEY": process.env.LANGGRAPH_CLOUD_API_KEY,
},
});
const assistants = await client.assistants.search({
metadata: null,
offset: 0,
limit: 1,
});*/
// We don't do any persisting, so we can just grab the first assistant
//const agent = assistants[0];
/*const streamEventsRunnable = RunnableLambda.from(async function* (
input: FilterGraphRunnableInput,
) {
const streamResponse = await remoteRunnable.stream({
});
for await (const event of streamResponse) {
yield streamevent.data;
}
}).withConfig({ runName: LAMBDA_STREAM_WRAPPER_NAME });*/
let displayFormat = "";
let chartType: ChartType;
const eventHandlerOne = (
streamEvent: StreamEvent,
fields: EventHandlerFields,
) => {
const langGraphEvent: StreamEvent = streamEvent.data.chunk;
if (!langGraphEvent) {
return;
}
const { event, name, data } = langGraphEvent;
if (event !== "on_chain_end") {
return;
}
if (name === "generate_filters") {
const { selected_filters }: { selected_filters: Partial<Filter> } =
data.output;
return handleSelectedFilters(selected_filters, fields.ui);
} else if (name === "generate_chart_type") {
chartType = data.output.chart_type;
return handleChartType(chartType, fields.ui);
} else if (name === "generate_data_display_format") {
displayFormat = data.output.display_format;
return handleDisplayFormat(displayFormat, chartType, fields.ui);
} else if (name === "filter_data") {
const { orders } = data.output;
if (!chartType || !displayFormat) {
throw new Error(
"Chart type and display format must be set before filtering data",
);
}
return handleConstructingCharts(
{
orders,
chartType,
displayFormat,
},
fields.ui,
);
}
};
const processedInputs = {
...inputs,
input: {
content: inputs.input,
},
};
return streamRunnableUI(remoteRunnable, processedInputs, {
eventHandlers: [eventHandlerOne],
});
}
export const EndpointsContext = exposeEndpoints({ filterGraph });
deanchanter commented
I figured it out I need v2 streamEvents!
input: FilterGraphRunnableInput,
) {
const streamResponse = await remoteRunnable.streamEvents(input,{version:'v2'
});
for await (const event of streamResponse) {
//console.log(event)
yield event;
}
}).withConfig({ runName: LAMBDA_STREAM_WRAPPER_NAME });```