MULTI-ON/cookbook

getting internal server error, whenever i try to connect multioon

Closed this issue · 0 comments

code:
import base64
import os
from io import BytesIO
from typing import Optional

import multion.client
multion.login()

from langchain.llms import OpenAI

from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI

os.environ["LANGCHAIN_TRACING"] = "true"
os.environ['OPENAI_API_KEY'] = "sk-***************"
from langchain.tools import StructuredTool
from human import HumanInputRun

#import multion
import multion
from multion.client import MultiOn
#multion_toolkit = Multion(use_api=True, mode="auto")
multion.CreateSessionInputBrowserParams.

def agent(query: str):
#from multion.client import MultiOn
multion_toolkit = MultiOn(api_key="50***************")
# multion.set_remote(True)

tool = StructuredTool.from_function(multion_toolkit.browse(cmd="search flights in google"),description="This function performs a browsing operation based on the given query.")
human_input = HumanInputRun()

llm = OpenAI(temperature=0)

# Structured tools are compatible with the STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION agent type.
agent_executor = initialize_agent(
    [tool, human_input],
    llm,
    agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True,
)

return agent_executor.run(query)

PROMPT = f"""
You are an expert AI Agent whose job is to find and get a gpu on a given service HYPERSTACK (https://www.hyperstack.cloud). Pass in the full detailed instructions exactly as below to MultiOn for browsing.

Here are the high-level steps:
1. Login and go to reach the gpu selection page
2. Click on the Load Provising profile and then select the multion-profile and load it
3. After the profile has been correctly selected and loaded continue
4. If an on-demand A100 GPU is available with 80GB RAM, then reserve it
5. If reservation is successful, then enable ssh access and return public ip to access the machine

"""

agent(query=PROMPT)

error:

yping.cast(InternalServerErrorResponse, construct_type(type_=InternalServerErrorResponse, object_=response.json())) # type: ignore
File "C:\Users\tfd250\AppData\Local\miniconda3\envs\albert\lib\site-packages\httpx_models.py", line 764, in json
return jsonlib.loads(self.content, **kwargs)
File "C:\Users\tfd250\AppData\Local\miniconda3\envs\albert\lib\json_init
.py", line 346, in loads
return _default_decoder.decode(s)
File "C:\Users\tfd250\AppData\Local\miniconda3\envs\albert\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Users\tfd250\AppData\Local\miniconda3\envs\albert\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)

please help

or do a video on using multion with langchain.

i am trying tom build assistant which can book movie ticket flight ticket everything automatically and chat with the user also..

i am trying to implement the this

https://github.com/MULTI-ON/api/tree/main/examples/gpu_agent