@@ -765,7 +765,6 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
num_tokens = 0
for tool in tools:
num_tokens += len(encoding.encode('type'))
- num_tokens += len(encoding.encode(tool.get("type")))
num_tokens += len(encoding.encode('function'))
# calculate num tokens for function object
@@ -327,10 +327,35 @@ def test_get_num_tokens():
UserPromptMessage(
content='Hello World!'
)
+ ],
+ tools=[
+ PromptMessageTool(
+ name='get_weather',
+ description='Determine weather in my location',
+ parameters={
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state e.g. San Francisco, CA"
+ },
+ "unit": {
+ "enum": [
+ "c",
+ "f"
+ ]
+ }
+ "required": [
+ "location"
+ ),
]
- assert num_tokens == 21
+ assert num_tokens == 72
@pytest.mark.parametrize('setup_openai_mock', [['chat', 'remote']], indirect=True)
def test_fine_tuned_models(setup_openai_mock):