mpetazzoni/sse.js

Not working with the Open AI assistant streaming.

Closed this issue · 2 comments

Hi ,

I am using this library at the client side to handle the openai assistant streaming response.

Open AI assistant stream the response in the following format -

event: thread.run.created
data: {"id":"run_Z9ZUOaPGw9gmE2mcturVbSMG","object":"thread.run","created_at":1714453013,"assistant_id":"asst_OeYfnXPdsIYqty1pQ4f1iska","thread_id":"thread_cuztRnV2RwWIIe9OtqKWGDCq","status":"queued","started_at":null,"expires_at":1714453613,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":"You are expert on company policy. Use your knowledge base to answer questions about the company policy.","tools":[{"type":"file_search"}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}

event: thread.run.queued
data: {"id":"run_Z9ZUOaPGw9gmE2mcturVbSMG","object":"thread.run","created_at":1714453013,"assistant_id":"asst_OeYfnXPdsIYqty1pQ4f1iska","thread_id":"thread_cuztRnV2RwWIIe9OtqKWGDCq","status":"queued","started_at":null,"expires_at":1714453613,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":"You are expert on company policy. Use your knowledge base to answer questions about the company policy.","tools":[{"type":"file_search"}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}

event: thread.run.in_progress
data: {"id":"run_Z9ZUOaPGw9gmE2mcturVbSMG","object":"thread.run","created_at":1714453013,"assistant_id":"asst_OeYfnXPdsIYqty1pQ4f1iska","thread_id":"thread_cuztRnV2RwWIIe9OtqKWGDCq","status":"in_progress","started_at":1714453013,"expires_at":1714453613,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":"You are expert on company policy. Use your knowledge base to answer questions about the company policy.","tools":[{"type":"file_search"}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}

event: thread.run.step.created
data: {"id":"step_Qb2hadvSzoW5bBUuXs71k4nc","object":"thread.run.step","created_at":1714453015,"run_id":"run_Z9ZUOaPGw9gmE2mcturVbSMG","assistant_id":"asst_OeYfnXPdsIYqty1pQ4f1iska","thread_id":"thread_cuztRnV2RwWIIe9OtqKWGDCq","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1714453613,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_9gkaOV7brzvGL8sxINHtL0W0"}},"usage":null}

event: thread.run.step.in_progress
data: {"id":"step_Qb2hadvSzoW5bBUuXs71k4nc","object":"thread.run.step","created_at":1714453015,"run_id":"run_Z9ZUOaPGw9gmE2mcturVbSMG","assistant_id":"asst_OeYfnXPdsIYqty1pQ4f1iska","thread_id":"thread_cuztRnV2RwWIIe9OtqKWGDCq","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1714453613,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_9gkaOV7brzvGL8sxINHtL0W0"}},"usage":null}

The SSE library is unable to capture this response, with no error displayed.

When I connect the SSE, it initially shows state 1, but after some time, it transitions to state 2, indicating that the connection is closed.

I already tested my server api is properly streaming the response.

here is how I have setup the SSE library-

var source = new SSE('<base_url_endpoint>', {
  headers: { 'Content-Type': 'text/plain' },
  payload: payloaddata,
  method: 'POST'
})

source.addEventListener('status', function (e) {
  console.log('System status is now: ' + e.data)
})

const handleReadyStateChange = e => {
  console.log('state', e.readyState)
  // here can't change 0 to 1
}

source.addEventListener('readystatechange', handleReadyStateChange)

source.addEventListener('message', function (e) {
  // Assuming we receive JSON-encoded data payloads:
  console.log('e', e)
  // Assuming we receive JSON-encoded data payloads:
  var payload = JSON.parse(e.data)
  console.log(payload)
})

source.stream()

Can anyone help, why its not working?

It is perfectly working when I try to capture the open ai chat completion stream response.

Open ai completion has the following stream format -

data: {"id":"chatcmpl-9JZqsVweBxcfeYlMMvsVDu3Sa5o8z","object":"chat.completion.chunk","created":1714453074,"model":"gpt-3.5-turbo-1106","system_fingerprint":"fp_b953e4de39","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}

data: {"id":"chatcmpl-9JZqsVweBxcfeYlMMvsVDu3Sa5o8z","object":"chat.completion.chunk","created":1714453074,"model":"gpt-3.5-turbo-1106","system_fingerprint":"fp_b953e4de39","choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]}

data: {"id":"chatcmpl-9JZqsVweBxcfeYlMMvsVDu3Sa5o8z","object":"chat.completion.chunk","created":1714453074,"model":"gpt-3.5-turbo-1106","system_fingerprint":"fp_b953e4de39","choices":[{"index":0,"delta":{"content":" data"},"logprobs":null,"finish_reason":null}]}

So I am guessing its something related to the stream format issue.

************ After some more digging ************

It look like I need to pass the custom event types into the library , but how to do it for the above open ai assistant streaming format?

@tv-ankur You need to add event listeners for the events you care about. Does OpenAI document the custom event types they use in their stream response?

@mpetazzoni thanks a lot for looking into it.

If anyone is using this library to receive the open ai assistant streaming response, you can subscribe to these event-

source.addEventListener('thread.message.delta', function (e) {
   //for getting streamed data
  const payload = JSON.parse(e.data)
  console.log("delta",payload.delta.content[0].text.value)
})

source.addEventListener('thread.message.completed', function (e) {
 //complete response in one go.
   const completeMessage = JSON.parse(e.data)
  
  console.log(completeMessage.content[0].text.value)
})

Here is the list of complete event list - https://platform.openai.com/docs/api-reference/assistants-streaming/events