From 664f39e607143fced855fa00f3464f9dfbea3769 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 24 Jul 2024 07:33:27 -0700 Subject: [PATCH 1/7] Create README.md --- .../\360\237\232\205LiteLLM Proxy/README.md" | 159 ++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 "third_party/\360\237\232\205LiteLLM Proxy/README.md" diff --git "a/third_party/\360\237\232\205LiteLLM Proxy/README.md" "b/third_party/\360\237\232\205LiteLLM Proxy/README.md" new file mode 100644 index 00000000..3e6bc44d --- /dev/null +++ "b/third_party/\360\237\232\205LiteLLM Proxy/README.md" @@ -0,0 +1,159 @@ +# Use LiteLLM Proxy to Call Mistral AI API + +Use [LiteLLM Proxy](https://docs.litellm.ai/docs/simple_proxy) for: +- Calling 100+ LLMs Mistral AI, OpenAI, Azure, Vertex, Bedrock/etc. in the OpenAI ChatCompletions & Completions format +- Track usage + set budgets with Virtual Keys + +Works for [Mistral AI API](https://docs.litellm.ai/docs/providers/mistral) + [Codestral API](https://docs.litellm.ai/docs/providers/codestral) + [Bedrock](https://docs.litellm.ai/docs/providers/bedrock) + +## Sample Usage + +### Step 1. Create a Config for LiteLLM proxy + +LiteLLM Requires a config with all your models define - we can call this file `litellm_config.yaml` + +[Detailed docs on how to setup litellm config - here](https://docs.litellm.ai/docs/proxy/configs) + +```yaml +model_list: + - model_name: mistral-small-latest ### MODEL Alias ### + litellm_params: # all params accepted by litellm.completion() - https://docs.litellm.ai/docs/completion/input + model: mistral/mistral-small-latest ### MODEL NAME sent to `litellm.completion()` ### + api_key: "os.environ/MISTRAL_API_KEY" # does os.getenv("MISTRAL_API_KEY") + - model_name: mistral-nemo + litellm_params: + model: mistral/open-mistral-nemo + api_key: "os.environ/MISTRAL_API_KEY" + +``` + +### Step 2. Start litellm proxy + +```shell +docker run \ + -v $(pwd)/litellm_config.yaml:/app/config.yaml \ + -e MISTRAL_API_KEY= + -p 4000:4000 \ + ghcr.io/berriai/litellm:main-latest \ + --config /app/config.yaml --detailed_debug +``` + +### Step 3. Test it! + +[Use with Langchain, LlamaIndex, Instructor, etc.](https://docs.litellm.ai/docs/proxy/user_keys) + +```bash +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +response = client.chat.completions.create( + model="mistral-small-latest", + messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } + ] +) + +print(response) +``` + +## Tool Calling + +```python +from openai import OpenAI +client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint + +tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } +] +messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] +completion = client.chat.completions.create( + model="mistral-small-latest", + messages=messages, + tools=tools, + tool_choice="auto" +) + +print(completion) + +``` + +## Vision Example + +```python + +from openai import OpenAI + +client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint + +response = client.chat.completions.create( + model="mistral-small-latest", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ], + } + ], + max_tokens=300, +) + +print(response.choices[0]) +``` + +## Supported Mistral AI API Models + +**ALL MODELS SUPPORTED**. + +Just add `mistral/` to the beginning of the model name. + +Example models: +| Model Name | Usage | +|----------------|--------------------------------------------------------------| +| Mistral Small | `completion(model="mistral/mistral-small-latest", messages)` | +| Mistral Medium | `completion(model="mistral/mistral-medium-latest", messages)`| +| Mistral Large | `completion(model="mistral/mistral-large-latest", messages)` | +| Mistral 7B | `completion(model="mistral/open-mistral-7b", messages)` | +| Mixtral 8x7B | `completion(model="mistral/open-mixtral-8x7b", messages)` | +| Mixtral 8x22B | `completion(model="mistral/open-mixtral-8x22b", messages)` | +| Codestral | `completion(model="mistral/codestral-latest", messages)` | +| Mistral NeMo | `completion(model="mistral/open-mistral-nemo", messages)` | +| Mistral NeMo 2407 | `completion(model="mistral/open-mistral-nemo-2407", messages)` | +| Codestral Mamba | `completion(model="mistral/open-codestral-mamba", messages)` | +| Codestral Mamba | `completion(model="mistral/codestral-mamba-latest"", messages)` | + + +## Supported Bedrock Mistral AI Models +| Model Name | Usage | +|----------------|--------------------------------------------------------------| +| Mistral 7B Instruct | `completion(model='bedrock/mistral.mistral-7b-instruct-v0:2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Mixtral 8x7B Instruct | `completion(model='bedrock/mistral.mixtral-8x7b-instruct-v0:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | From bf5f32fabc894b2a0702539969e8ba9850a9a31c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 24 Jul 2024 07:39:15 -0700 Subject: [PATCH 2/7] =?UTF-8?q?Delete=20third=5Fparty/=F0=9F=9A=85LiteLLM?= =?UTF-8?q?=20Proxy=20directory?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../\360\237\232\205LiteLLM Proxy/README.md" | 159 ------------------ 1 file changed, 159 deletions(-) delete mode 100644 "third_party/\360\237\232\205LiteLLM Proxy/README.md" diff --git "a/third_party/\360\237\232\205LiteLLM Proxy/README.md" "b/third_party/\360\237\232\205LiteLLM Proxy/README.md" deleted file mode 100644 index 3e6bc44d..00000000 --- "a/third_party/\360\237\232\205LiteLLM Proxy/README.md" +++ /dev/null @@ -1,159 +0,0 @@ -# Use LiteLLM Proxy to Call Mistral AI API - -Use [LiteLLM Proxy](https://docs.litellm.ai/docs/simple_proxy) for: -- Calling 100+ LLMs Mistral AI, OpenAI, Azure, Vertex, Bedrock/etc. in the OpenAI ChatCompletions & Completions format -- Track usage + set budgets with Virtual Keys - -Works for [Mistral AI API](https://docs.litellm.ai/docs/providers/mistral) + [Codestral API](https://docs.litellm.ai/docs/providers/codestral) + [Bedrock](https://docs.litellm.ai/docs/providers/bedrock) - -## Sample Usage - -### Step 1. Create a Config for LiteLLM proxy - -LiteLLM Requires a config with all your models define - we can call this file `litellm_config.yaml` - -[Detailed docs on how to setup litellm config - here](https://docs.litellm.ai/docs/proxy/configs) - -```yaml -model_list: - - model_name: mistral-small-latest ### MODEL Alias ### - litellm_params: # all params accepted by litellm.completion() - https://docs.litellm.ai/docs/completion/input - model: mistral/mistral-small-latest ### MODEL NAME sent to `litellm.completion()` ### - api_key: "os.environ/MISTRAL_API_KEY" # does os.getenv("MISTRAL_API_KEY") - - model_name: mistral-nemo - litellm_params: - model: mistral/open-mistral-nemo - api_key: "os.environ/MISTRAL_API_KEY" - -``` - -### Step 2. Start litellm proxy - -```shell -docker run \ - -v $(pwd)/litellm_config.yaml:/app/config.yaml \ - -e MISTRAL_API_KEY= - -p 4000:4000 \ - ghcr.io/berriai/litellm:main-latest \ - --config /app/config.yaml --detailed_debug -``` - -### Step 3. Test it! - -[Use with Langchain, LlamaIndex, Instructor, etc.](https://docs.litellm.ai/docs/proxy/user_keys) - -```bash -import openai -client = openai.OpenAI( - api_key="anything", - base_url="http://0.0.0.0:4000" -) - -response = client.chat.completions.create( - model="mistral-small-latest", - messages = [ - { - "role": "user", - "content": "this is a test request, write a short poem" - } - ] -) - -print(response) -``` - -## Tool Calling - -```python -from openai import OpenAI -client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint - -tools = [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - } - } -] -messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] -completion = client.chat.completions.create( - model="mistral-small-latest", - messages=messages, - tools=tools, - tool_choice="auto" -) - -print(completion) - -``` - -## Vision Example - -```python - -from openai import OpenAI - -client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint - -response = client.chat.completions.create( - model="mistral-small-latest", - messages=[ - { - "role": "user", - "content": [ - {"type": "text", "text": "What's in this image?"}, - { - "type": "image_url", - "image_url": { - "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" - } - } - ], - } - ], - max_tokens=300, -) - -print(response.choices[0]) -``` - -## Supported Mistral AI API Models - -**ALL MODELS SUPPORTED**. - -Just add `mistral/` to the beginning of the model name. - -Example models: -| Model Name | Usage | -|----------------|--------------------------------------------------------------| -| Mistral Small | `completion(model="mistral/mistral-small-latest", messages)` | -| Mistral Medium | `completion(model="mistral/mistral-medium-latest", messages)`| -| Mistral Large | `completion(model="mistral/mistral-large-latest", messages)` | -| Mistral 7B | `completion(model="mistral/open-mistral-7b", messages)` | -| Mixtral 8x7B | `completion(model="mistral/open-mixtral-8x7b", messages)` | -| Mixtral 8x22B | `completion(model="mistral/open-mixtral-8x22b", messages)` | -| Codestral | `completion(model="mistral/codestral-latest", messages)` | -| Mistral NeMo | `completion(model="mistral/open-mistral-nemo", messages)` | -| Mistral NeMo 2407 | `completion(model="mistral/open-mistral-nemo-2407", messages)` | -| Codestral Mamba | `completion(model="mistral/open-codestral-mamba", messages)` | -| Codestral Mamba | `completion(model="mistral/codestral-mamba-latest"", messages)` | - - -## Supported Bedrock Mistral AI Models -| Model Name | Usage | -|----------------|--------------------------------------------------------------| -| Mistral 7B Instruct | `completion(model='bedrock/mistral.mistral-7b-instruct-v0:2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Mixtral 8x7B Instruct | `completion(model='bedrock/mistral.mixtral-8x7b-instruct-v0:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | From b4a9e60b06b23d4e3c8799f527e0053d7fc649b5 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 24 Jul 2024 07:39:40 -0700 Subject: [PATCH 3/7] Create README.md --- third_party/LiteLLM/README.md | 159 ++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 third_party/LiteLLM/README.md diff --git a/third_party/LiteLLM/README.md b/third_party/LiteLLM/README.md new file mode 100644 index 00000000..3e6bc44d --- /dev/null +++ b/third_party/LiteLLM/README.md @@ -0,0 +1,159 @@ +# Use LiteLLM Proxy to Call Mistral AI API + +Use [LiteLLM Proxy](https://docs.litellm.ai/docs/simple_proxy) for: +- Calling 100+ LLMs Mistral AI, OpenAI, Azure, Vertex, Bedrock/etc. in the OpenAI ChatCompletions & Completions format +- Track usage + set budgets with Virtual Keys + +Works for [Mistral AI API](https://docs.litellm.ai/docs/providers/mistral) + [Codestral API](https://docs.litellm.ai/docs/providers/codestral) + [Bedrock](https://docs.litellm.ai/docs/providers/bedrock) + +## Sample Usage + +### Step 1. Create a Config for LiteLLM proxy + +LiteLLM Requires a config with all your models define - we can call this file `litellm_config.yaml` + +[Detailed docs on how to setup litellm config - here](https://docs.litellm.ai/docs/proxy/configs) + +```yaml +model_list: + - model_name: mistral-small-latest ### MODEL Alias ### + litellm_params: # all params accepted by litellm.completion() - https://docs.litellm.ai/docs/completion/input + model: mistral/mistral-small-latest ### MODEL NAME sent to `litellm.completion()` ### + api_key: "os.environ/MISTRAL_API_KEY" # does os.getenv("MISTRAL_API_KEY") + - model_name: mistral-nemo + litellm_params: + model: mistral/open-mistral-nemo + api_key: "os.environ/MISTRAL_API_KEY" + +``` + +### Step 2. Start litellm proxy + +```shell +docker run \ + -v $(pwd)/litellm_config.yaml:/app/config.yaml \ + -e MISTRAL_API_KEY= + -p 4000:4000 \ + ghcr.io/berriai/litellm:main-latest \ + --config /app/config.yaml --detailed_debug +``` + +### Step 3. Test it! + +[Use with Langchain, LlamaIndex, Instructor, etc.](https://docs.litellm.ai/docs/proxy/user_keys) + +```bash +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +response = client.chat.completions.create( + model="mistral-small-latest", + messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } + ] +) + +print(response) +``` + +## Tool Calling + +```python +from openai import OpenAI +client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint + +tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } +] +messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] +completion = client.chat.completions.create( + model="mistral-small-latest", + messages=messages, + tools=tools, + tool_choice="auto" +) + +print(completion) + +``` + +## Vision Example + +```python + +from openai import OpenAI + +client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint + +response = client.chat.completions.create( + model="mistral-small-latest", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ], + } + ], + max_tokens=300, +) + +print(response.choices[0]) +``` + +## Supported Mistral AI API Models + +**ALL MODELS SUPPORTED**. + +Just add `mistral/` to the beginning of the model name. + +Example models: +| Model Name | Usage | +|----------------|--------------------------------------------------------------| +| Mistral Small | `completion(model="mistral/mistral-small-latest", messages)` | +| Mistral Medium | `completion(model="mistral/mistral-medium-latest", messages)`| +| Mistral Large | `completion(model="mistral/mistral-large-latest", messages)` | +| Mistral 7B | `completion(model="mistral/open-mistral-7b", messages)` | +| Mixtral 8x7B | `completion(model="mistral/open-mixtral-8x7b", messages)` | +| Mixtral 8x22B | `completion(model="mistral/open-mixtral-8x22b", messages)` | +| Codestral | `completion(model="mistral/codestral-latest", messages)` | +| Mistral NeMo | `completion(model="mistral/open-mistral-nemo", messages)` | +| Mistral NeMo 2407 | `completion(model="mistral/open-mistral-nemo-2407", messages)` | +| Codestral Mamba | `completion(model="mistral/open-codestral-mamba", messages)` | +| Codestral Mamba | `completion(model="mistral/codestral-mamba-latest"", messages)` | + + +## Supported Bedrock Mistral AI Models +| Model Name | Usage | +|----------------|--------------------------------------------------------------| +| Mistral 7B Instruct | `completion(model='bedrock/mistral.mistral-7b-instruct-v0:2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Mixtral 8x7B Instruct | `completion(model='bedrock/mistral.mixtral-8x7b-instruct-v0:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | From 97d98d441b93039d572536d0150fa3902acbf7c3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 24 Jul 2024 07:41:37 -0700 Subject: [PATCH 4/7] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d4200312..7bf954df 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,7 @@ Disclaimer: Examples contributed by the community and partners do not represent | [Indexify Integration - PDF Summarization](third_party/Indexify/pdf-summarization) | summarization, PDF | Indexify | | [langgraph_code_assistant_mistral.ipynb](third_party/langchain/langgraph_code_assistant_mistral.ipynb) | code | Langchain | | [langgraph_crag_mistral.ipynb](third_party/langchain/langgraph_crag_mistral.ipynb) | RAG | Langchain | +| [Use 🚅LiteLLM Proxy to Call Mistral AI API](third_party/LiteLLM/README.md) | LLM Gateway/Proxy | LiteLLM | | [llamaindex_agentic_rag.ipynb](third_party/LlamaIndex/llamaindex_agentic_rag.ipynb) | RAG, agent | LLamaIndex | | [llamaindex_mistralai_finetuning.ipynb](third_party/LlamaIndex/llamaindex_mistralai_finetuning.ipynb) | fine-tuning | LLamaIndex | | [Mesop Integration - Chat with PDF](third_party/mesop/README.md) | UI chat, demo, RAG | Mesop | From d0933641a2ebad1ecaefe00d3ae1c3ca7e7fe5c0 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 24 Jul 2024 07:41:58 -0700 Subject: [PATCH 5/7] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7bf954df..22108003 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ Disclaimer: Examples contributed by the community and partners do not represent | [Indexify Integration - PDF Summarization](third_party/Indexify/pdf-summarization) | summarization, PDF | Indexify | | [langgraph_code_assistant_mistral.ipynb](third_party/langchain/langgraph_code_assistant_mistral.ipynb) | code | Langchain | | [langgraph_crag_mistral.ipynb](third_party/langchain/langgraph_crag_mistral.ipynb) | RAG | Langchain | -| [Use 🚅LiteLLM Proxy to Call Mistral AI API](third_party/LiteLLM/README.md) | LLM Gateway/Proxy | LiteLLM | +| [Use 🚅 LiteLLM Proxy to Call Mistral AI API](third_party/LiteLLM/README.md) | LLM Gateway/Proxy | LiteLLM | | [llamaindex_agentic_rag.ipynb](third_party/LlamaIndex/llamaindex_agentic_rag.ipynb) | RAG, agent | LLamaIndex | | [llamaindex_mistralai_finetuning.ipynb](third_party/LlamaIndex/llamaindex_mistralai_finetuning.ipynb) | fine-tuning | LLamaIndex | | [Mesop Integration - Chat with PDF](third_party/mesop/README.md) | UI chat, demo, RAG | Mesop | From 16636ed99fb0c2e464caad2e1583155db8cedb60 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 19:55:47 -0700 Subject: [PATCH 6/7] Update README.md - use mistral SDK --- third_party/LiteLLM/README.md | 55 ++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/third_party/LiteLLM/README.md b/third_party/LiteLLM/README.md index 3e6bc44d..4a6e1d87 100644 --- a/third_party/LiteLLM/README.md +++ b/third_party/LiteLLM/README.md @@ -42,31 +42,40 @@ docker run \ [Use with Langchain, LlamaIndex, Instructor, etc.](https://docs.litellm.ai/docs/proxy/user_keys) -```bash -import openai -client = openai.OpenAI( - api_key="anything", - base_url="http://0.0.0.0:4000" -) -response = client.chat.completions.create( + +## Basic Chat Completion + +```python +import os + +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient( + api_key="sk-1234", # set api_key to litellm proxy virtual key + endpoint="http://0.0.0.0:4000" # set endpoint to litellm proxy endpoint +) +chat_response = client.chat( model="mistral-small-latest", - messages = [ - { - "role": "user", - "content": "this is a test request, write a short poem" - } - ] + messages=[ + {"role": "user", "content": "this is a test request, write a short poem"} + ], ) +print(chat_response.choices[0].message.content) -print(response) ``` -## Tool Calling - ```python -from openai import OpenAI -client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint +import os + +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient( + api_key="sk-1234", # set api_key to litellm proxy virtual key + endpoint="http://0.0.0.0:4000" # set endpoint to litellm proxy endpoint +) tools = [ { @@ -104,9 +113,15 @@ print(completion) ```python -from openai import OpenAI +import os -client = OpenAI(api_key="anything", base_url="http://0.0.0.0:4000") # set base_url to litellm proxy endpoint +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient( + api_key="sk-1234", # set api_key to litellm proxy virtual key + endpoint="http://0.0.0.0:4000" # set endpoint to litellm proxy endpoint +) response = client.chat.completions.create( model="mistral-small-latest", From 4e88180d9f8faf93bc0d35edbc76d352c2fd7ce5 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 19:56:43 -0700 Subject: [PATCH 7/7] Update README.md --- third_party/LiteLLM/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/third_party/LiteLLM/README.md b/third_party/LiteLLM/README.md index 4a6e1d87..1ad60e62 100644 --- a/third_party/LiteLLM/README.md +++ b/third_party/LiteLLM/README.md @@ -66,6 +66,7 @@ print(chat_response.choices[0].message.content) ``` +## Tool Use ```python import os