Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 17 additions & 3 deletions browser_use/llm/vercel/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,8 @@ class ChatVercel(BaseChatModel):
prompt-based JSON extraction. Auto-detects common reasoning models by default.
timeout: Request timeout in seconds
max_retries: Maximum number of retries for failed requests
provider_options: Provider routing options for the gateway. Use this to control which
providers are used and in what order. Example: {'gateway': {'order': ['vertex', 'anthropic']}}
"""

# Model configuration
Expand Down Expand Up @@ -218,6 +220,7 @@ class ChatVercel(BaseChatModel):
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
provider_options: dict[str, Any] | None = None

# Static
@property
Expand Down Expand Up @@ -382,6 +385,8 @@ async def ainvoke(
model_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.provider_options:
model_params['extra_body'] = {'providerOptions': self.provider_options}

if output_format is None:
# Return string response
Expand All @@ -400,11 +405,12 @@ async def ainvoke(

else:
is_google_model = self.model.startswith('google/')
is_anthropic_model = self.model.startswith('anthropic/')
is_reasoning_model = self.reasoning_models and any(
str(pattern).lower() in str(self.model).lower() for pattern in self.reasoning_models
)

if is_google_model or is_reasoning_model:
if is_google_model or is_anthropic_model or is_reasoning_model:
modified_messages = [m.model_copy(deep=True) for m in messages]

schema = SchemaOptimizer.create_gemini_optimized_schema(output_format)
Expand All @@ -431,10 +437,14 @@ async def ainvoke(

vercel_messages = VercelMessageSerializer.serialize_messages(modified_messages)

request_params = model_params.copy()
if self.provider_options:
request_params['extra_body'] = {'providerOptions': self.provider_options}

response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**model_params,
**request_params,
)

content = response.choices[0].message.content if response.choices else None
Expand Down Expand Up @@ -479,14 +489,18 @@ async def ainvoke(
'schema': schema,
}

request_params = model_params.copy()
if self.provider_options:
request_params['extra_body'] = {'providerOptions': self.provider_options}

response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
**model_params,
**request_params,
)

content = response.choices[0].message.content if response.choices else None
Expand Down
14 changes: 13 additions & 1 deletion docs/supported-models.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -358,12 +358,24 @@ api_key = os.getenv('VERCEL_API_KEY')
if not api_key:
raise ValueError('VERCEL_API_KEY is not set')

# Use Vercel AI Gateway
# Basic usage
llm = ChatVercel(
model='openai/gpt-4o',
api_key=api_key,
)

# With provider options - control which providers are used and in what order
# This will try Vertex AI first, then fall back to Anthropic if Vertex fails
llm_with_provider_options = ChatVercel(
model='anthropic/claude-sonnet-4',
api_key=api_key,
provider_options={
'gateway': {
'order': ['vertex', 'anthropic'] # Try Vertex AI first, then Anthropic
}
},
)

agent = Agent(
task="Your task here",
llm=llm
Expand Down
19 changes: 19 additions & 0 deletions examples/models/vercel_ai_gateway.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,38 @@
if not api_key:
raise ValueError('VERCEL_API_KEY is not set')

# Basic usage
llm = ChatVercel(
model='openai/gpt-4o',
api_key=api_key,
)

# Example with provider options - control which providers are used and in what order
# This will try Vertex AI first, then fall back to Anthropic if Vertex fails
llm_with_provider_options = ChatVercel(
model='anthropic/claude-sonnet-4',
api_key=api_key,
provider_options={
'gateway': {
'order': ['vertex', 'anthropic'] # Try Vertex AI first, then Anthropic
}
},
)

agent = Agent(
task='Go to example.com and summarize the main content',
llm=llm,
)

agent_with_provider_options = Agent(
task='Go to example.com and summarize the main content',
llm=llm_with_provider_options,
)


async def main():
await agent.run(max_steps=10)
await agent_with_provider_options.run(max_steps=10)


if __name__ == '__main__':
Expand Down
Loading