Skip to content

Commit 902d5d3

Browse files
SDK regeneration (#255)
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
1 parent 3d05d2f commit 902d5d3

18 files changed

+238
-307
lines changed

package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@
2222
"form-data-encoder": "^4.0.2",
2323
"@aws-sdk/client-sagemaker": "^3.583.0",
2424
"@aws-sdk/credential-providers": "^3.583.0",
25-
"@aws-sdk/protocol-http": "^3.374.0",
26-
"@aws-sdk/signature-v4": "^3.374.0",
25+
"@smithy/protocol-http": "^5.1.2",
26+
"@smithy/signature-v4": "^5.1.2",
2727
"convict": "^6.2.4"
2828
},
2929
"devDependencies": {

src/api/client/requests/ChatRequest.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,13 @@ export interface ChatRequest {
240240
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
241241
*/
242242
presencePenalty?: number;
243+
/**
244+
* When enabled, the user's prompt will be sent to the model without
245+
* any pre-processing.
246+
*
247+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
248+
*/
249+
rawPrompting?: boolean;
243250
/**
244251
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
245252
*

src/api/client/requests/ChatStreamRequest.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,13 @@ export interface ChatStreamRequest {
186186
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
187187
*/
188188
presencePenalty?: number;
189+
/**
190+
* When enabled, the user's prompt will be sent to the model without
191+
* any pre-processing.
192+
*
193+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
194+
*/
195+
rawPrompting?: boolean;
189196
/**
190197
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
191198
*

src/api/resources/v2/client/requests/V2ChatRequest.ts

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -127,9 +127,11 @@ export interface V2ChatRequest {
127127
*/
128128
safetyMode?: Cohere.V2ChatRequestSafetyMode;
129129
/**
130-
* The maximum number of tokens the model will generate as part of the response.
130+
* The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).
131131
*
132-
* **Note**: Setting a low value may result in incomplete generations.
132+
* **Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.
133+
*
134+
* **Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
133135
*/
134136
maxTokens?: number;
135137
/** A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. */
@@ -177,9 +179,14 @@ export interface V2ChatRequest {
177179
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
178180
*
179181
* **Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer.
180-
*
181-
* **Note**: The same functionality can be achieved in `/v1/chat` using the `force_single_step` parameter. If `force_single_step=true`, this is equivalent to specifying `REQUIRED`. While if `force_single_step=true` and `tool_results` are passed, this is equivalent to specifying `NONE`.
182182
*/
183183
toolChoice?: Cohere.V2ChatRequestToolChoice;
184184
thinking?: Cohere.Thinking;
185+
/**
186+
* When enabled, the user's prompt will be sent to the model without
187+
* any pre-processing.
188+
*
189+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
190+
*/
191+
rawPrompting?: boolean;
185192
}

src/api/resources/v2/client/requests/V2ChatStreamRequest.ts

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,9 +114,11 @@ export interface V2ChatStreamRequest {
114114
*/
115115
safetyMode?: Cohere.V2ChatStreamRequestSafetyMode;
116116
/**
117-
* The maximum number of tokens the model will generate as part of the response.
117+
* The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).
118118
*
119-
* **Note**: Setting a low value may result in incomplete generations.
119+
* **Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.
120+
*
121+
* **Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
120122
*/
121123
maxTokens?: number;
122124
/** A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. */
@@ -164,9 +166,14 @@ export interface V2ChatStreamRequest {
164166
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
165167
*
166168
* **Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer.
167-
*
168-
* **Note**: The same functionality can be achieved in `/v1/chat` using the `force_single_step` parameter. If `force_single_step=true`, this is equivalent to specifying `REQUIRED`. While if `force_single_step=true` and `tool_results` are passed, this is equivalent to specifying `NONE`.
169169
*/
170170
toolChoice?: Cohere.V2ChatStreamRequestToolChoice;
171171
thinking?: Cohere.Thinking;
172+
/**
173+
* When enabled, the user's prompt will be sent to the model without
174+
* any pre-processing.
175+
*
176+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
177+
*/
178+
rawPrompting?: boolean;
172179
}

src/api/resources/v2/types/V2ChatRequestToolChoice.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
99
*
1010
* **Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer.
11-
*
12-
* **Note**: The same functionality can be achieved in `/v1/chat` using the `force_single_step` parameter. If `force_single_step=true`, this is equivalent to specifying `REQUIRED`. While if `force_single_step=true` and `tool_results` are passed, this is equivalent to specifying `NONE`.
1311
*/
1412
export type V2ChatRequestToolChoice = "REQUIRED" | "NONE";
1513
export const V2ChatRequestToolChoice = {

src/api/resources/v2/types/V2ChatStreamRequestToolChoice.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
99
*
1010
* **Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer.
11-
*
12-
* **Note**: The same functionality can be achieved in `/v1/chat` using the `force_single_step` parameter. If `force_single_step=true`, this is equivalent to specifying `REQUIRED`. While if `force_single_step=true` and `tool_results` are passed, this is equivalent to specifying `NONE`.
1311
*/
1412
export type V2ChatStreamRequestToolChoice = "REQUIRED" | "NONE";
1513
export const V2ChatStreamRequestToolChoice = {

src/api/types/AssistantMessageV2ContentItem.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,17 @@
44

55
import * as Cohere from "../index";
66

7-
export type AssistantMessageV2ContentItem = Cohere.AssistantMessageV2ContentItem.Text;
7+
export type AssistantMessageV2ContentItem =
8+
| Cohere.AssistantMessageV2ContentItem.Text
9+
| Cohere.AssistantMessageV2ContentItem.Thinking;
810

911
export namespace AssistantMessageV2ContentItem {
1012
export interface Text extends Cohere.ChatTextContent {
1113
type: "text";
1214
}
15+
16+
export interface Thinking {
17+
type: "thinking";
18+
value?: unknown;
19+
}
1320
}

src/api/types/ChatContentDeltaEventDeltaMessageContent.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,6 @@
33
*/
44

55
export interface ChatContentDeltaEventDeltaMessageContent {
6+
thinking?: string;
67
text?: string;
78
}

src/api/types/ChatMessageEndEventDelta.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,11 @@
22
* This file was auto-generated by Fern from our API Definition.
33
*/
44

5+
import * as Cohere from "../index";
6+
57
export interface ChatMessageEndEventDelta {
68
/** An error message if an error occurred during the generation. */
79
error?: string;
10+
finishReason?: Cohere.ChatFinishReason;
11+
usage?: Cohere.Usage;
812
}

0 commit comments

Comments
 (0)