Skip to content

Commit b63fdb8

Browse files
authored
Change 'in-memory' to 'in_memory' in prompt cache enums (#6187)
1 parent 4708bb8 commit b63fdb8

2 files changed

Lines changed: 8 additions & 3 deletions

File tree

.changeset/odd-peaches-chew.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@effect/ai-openai": patch
3+
---
4+
5+
Change 'in-memory' to 'in_memory' in prompt cache enums

packages/ai/openai/src/Generated.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2466,7 +2466,7 @@ export class ChatCompletionFunctions extends S.Class<ChatCompletionFunctions>("C
24662466
/**
24672467
* The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
24682468
*/
2469-
export class CreateChatCompletionRequestPromptCacheRetentionEnum extends S.Literal("in-memory", "24h") {}
2469+
export class CreateChatCompletionRequestPromptCacheRetentionEnum extends S.Literal("in_memory", "24h") {}
24702470

24712471
export class CreateChatCompletionRequest extends S.Class<CreateChatCompletionRequest>("CreateChatCompletionRequest")({
24722472
/**
@@ -14969,7 +14969,7 @@ export class CreateResponseTruncationEnum extends S.Literal("auto", "disabled")
1496914969
/**
1497014970
* The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1497114971
*/
14972-
export class CreateResponsePromptCacheRetentionEnum extends S.Literal("in-memory", "24h") {}
14972+
export class CreateResponsePromptCacheRetentionEnum extends S.Literal("in_memory", "24h") {}
1497314973

1497414974
export class CreateResponse extends S.Class<CreateResponse>("CreateResponse")({
1497514975
"input": S.optionalWith(InputParam, { nullable: true }),
@@ -15181,7 +15181,7 @@ export class ResponseTruncationEnum extends S.Literal("auto", "disabled") {}
1518115181
/**
1518215182
* The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1518315183
*/
15184-
export class ResponsePromptCacheRetentionEnum extends S.Literal("in-memory", "24h") {}
15184+
export class ResponsePromptCacheRetentionEnum extends S.Literal("in_memory", "24h") {}
1518515185

1518615186
export class Response extends S.Class<Response>("Response")({
1518715187
/**

0 commit comments

Comments
 (0)