File tree

5 files changed

+376
-27
lines changed

5 files changed

+376
-27
lines changed
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,25 @@
2828
generative_models as preview_generative_models,
2929
)
3030

31+
32+
# A dummy function for function calling
33+
def get_current_weather(location: str, unit: str = "centigrade"):
34+
"""Gets weather in the specified location.
35+
36+
Args:
37+
location: The location for which to get the weather.
38+
unit: Optional. Temperature unit. Can be Centigrade or Fahrenheit. Defaults to Centigrade.
39+
40+
Returns:
41+
The weather information as a dict.
42+
"""
43+
return dict(
44+
location=location,
45+
unit=unit,
46+
weather="Super nice, but maybe a bit hot.",
47+
)
48+
49+
3150
_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT = {
3251
"type": "object",
3352
"properties": {
@@ -320,3 +339,34 @@ def test_generate_content_function_calling(self):
320339
summary = response.candidates[0].content.parts[0].text
321340

322341
assert summary
342+
343+
def test_chat_automatic_function_calling(self):
344+
get_current_weather_func = generative_models.FunctionDeclaration.from_func(
345+
get_current_weather
346+
)
347+
348+
weather_tool = generative_models.Tool(
349+
function_declarations=[get_current_weather_func],
350+
)
351+
352+
model = preview_generative_models.GenerativeModel(
353+
"gemini-1.0-pro",
354+
# Specifying the tools once to avoid specifying them in every request
355+
tools=[weather_tool],
356+
)
357+
358+
chat = model.start_chat(
359+
responder=preview_generative_models.AutomaticFunctionCallingResponder(
360+
max_automatic_function_calls=1,
361+
)
362+
)
363+
364+
response = chat.send_message("What is the weather like in Boston?")
365+
366+
assert response.text
367+
assert "nice" in response.text
368+
assert len(chat.history) == 4
369+
assert chat.history[-3].parts[0].function_call
370+
assert chat.history[-3].parts[0].function_call.name == "get_current_weather"
371+
assert chat.history[-2].parts[0].function_response
372+
assert chat.history[-2].parts[0].function_response.name == "get_current_weather"
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,45 @@ def test_generate_content_grounding_vertex_ai_search_retriever(self):
538538
)
539539
assert response.text
540540

541+
@mock..object(
542+
target=prediction_service.PredictionServiceClient,
543+
attribute="generate_content",
544+
new=mock_generate_content,
545+
)
546+
def test_chat_automatic_function_calling(self):
547+
generative_models = preview_generative_models
548+
get_current_weather_func = generative_models.FunctionDeclaration.from_func(
549+
get_current_weather
550+
)
551+
weather_tool = generative_models.Tool(
552+
function_declarations=[get_current_weather_func],
553+
)
554+
555+
model = generative_models.GenerativeModel(
556+
"gemini-pro",
557+
# Specifying the tools once to avoid specifying them in every request
558+
tools=[weather_tool],
559+
)
560+
afc_responder = generative_models.AutomaticFunctionCallingResponder(
561+
max_automatic_function_calls=5,
562+
)
563+
chat = model.start_chat(responder=afc_responder)
564+
565+
response1 = chat.send_message("What is the weather like in Boston?")
566+
assert response1.text.startswith("The weather in Boston is")
567+
assert "nice" in response1.text
568+
assert len(chat.history) == 4
569+
assert chat.history[-3].parts[0].function_call
570+
assert chat.history[-2].parts[0].function_response
571+
572+
# Test max_automatic_function_calls:
573+
# Setting the AFC limit to 0 to test the error handling
574+
afc_responder._max_automatic_function_calls = 0
575+
chat2 = model.start_chat(responder=afc_responder)
576+
with pytest.raises(RuntimeError) as err:
577+
chat2.send_message("What is the weather like in Boston?")
578+
assert err.match("Exceeded the maximum")
579+
541580

542581
EXPECTED_SCHEMA_FOR_GET_CURRENT_WEATHER = {
543582
"title": "get_current_weather",
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,52 @@ print(chat.send_message(
116116
))
117117
```
118118

119+
120+
#### Automatic Function calling
121+
122+
```
123+
from vertexai..preview generative_models import GenerativeModel, Tool, FunctionDeclaration, AutomaticFunctionCallingResponder
124+
125+
# First, create functions that the model is can use to answer your questions.
126+
def get_current_weather(location: str, unit: str = "centigrade"):
127+
"""Gets weather in the specified location.
128+
129+
Args:
130+
location: The location for which to get the weather.
131+
unit: Optional. Temperature unit. Can be Centigrade or Fahrenheit. Defaults to Centigrade.
132+
"""
133+
return dict(
134+
location=location,
135+
unit=unit,
136+
weather="Super nice, but maybe a bit hot.",
137+
)
138+
139+
# Infer function schema
140+
get_current_weather_func = FunctionDeclaration.from_func(get_current_weather)
141+
# Tool is a collection of related functions
142+
weather_tool = Tool(
143+
function_declarations=[get_current_weather_func],
144+
)
145+
146+
# Use tools in chat:
147+
model = GenerativeModel(
148+
"gemini-pro",
149+
# You can specify tools when creating a model to avoid having to send them with every request.
150+
tools=[weather_tool],
151+
)
152+
153+
# Activate automatic function calling:
154+
afc_responder = AutomaticFunctionCallingResponder(
155+
# Optional:
156+
max_automatic_function_calls=5,
157+
)
158+
chat = model.start_chat(responder=afc_responder)
159+
# Send a message to the model. The model will respond with a function call.
160+
# The SDK will automatically call the requested function and respond to the model.
161+
# The model will use the function call response to answer the original question.
162+
print(chat.send_message("What is the weather like in Boston?"))
163+
```
164+
119165
## Documentation
120166

121167
You can find complete documentation for the Vertex AI SDKs and the Gemini model in the Google Cloud [documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview)

0 commit comments

Comments
 (0)