Skip to content

Commit 759ec10

Browse files
authored
fix vercel gateway variants (anomalyco#13541)
Co-authored-by: Benjamin Woodruff <github@benjam.info>"
1 parent ef205c3 commit 759ec10

File tree

2 files changed

+380
-10
lines changed

2 files changed

+380
-10
lines changed

packages/opencode/src/provider/transform.ts

Lines changed: 87 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ export namespace ProviderTransform {
171171
return msgs
172172
}
173173

174-
function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
174+
function applyCaching(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
175175
const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
176176
const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
177177

@@ -194,7 +194,7 @@ export namespace ProviderTransform {
194194
}
195195

196196
for (const msg of unique([...system, ...final])) {
197-
const useMessageLevelOptions = providerID === "anthropic" || providerID.includes("bedrock")
197+
const useMessageLevelOptions = model.providerID === "anthropic" || model.providerID.includes("bedrock")
198198
const shouldUseContentOptions = !useMessageLevelOptions && Array.isArray(msg.content) && msg.content.length > 0
199199

200200
if (shouldUseContentOptions) {
@@ -253,14 +253,15 @@ export namespace ProviderTransform {
253253
msgs = unsupportedParts(msgs, model)
254254
msgs = normalizeMessages(msgs, model, options)
255255
if (
256-
model.providerID === "anthropic" ||
257-
model.api.id.includes("anthropic") ||
258-
model.api.id.includes("claude") ||
259-
model.id.includes("anthropic") ||
260-
model.id.includes("claude") ||
261-
model.api.npm === "@ai-sdk/anthropic"
256+
(model.providerID === "anthropic" ||
257+
model.api.id.includes("anthropic") ||
258+
model.api.id.includes("claude") ||
259+
model.id.includes("anthropic") ||
260+
model.id.includes("claude") ||
261+
model.api.npm === "@ai-sdk/anthropic") &&
262+
model.api.npm !== "@ai-sdk/gateway"
262263
) {
263-
msgs = applyCaching(msgs, model.providerID)
264+
msgs = applyCaching(msgs, model)
264265
}
265266

266267
// Remap providerOptions keys from stored providerID to expected SDK key
@@ -363,8 +364,50 @@ export namespace ProviderTransform {
363364
if (!model.id.includes("gpt") && !model.id.includes("gemini-3")) return {}
364365
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
365366

366-
// TODO: YOU CANNOT SET max_tokens if this is set!!!
367367
case "@ai-sdk/gateway":
368+
if (model.id.includes("anthropic")) {
369+
return {
370+
high: {
371+
thinking: {
372+
type: "enabled",
373+
budgetTokens: 16000,
374+
},
375+
},
376+
max: {
377+
thinking: {
378+
type: "enabled",
379+
budgetTokens: 31999,
380+
},
381+
},
382+
}
383+
}
384+
if (model.id.includes("google")) {
385+
if (id.includes("2.5")) {
386+
return {
387+
high: {
388+
thinkingConfig: {
389+
includeThoughts: true,
390+
thinkingBudget: 16000,
391+
},
392+
},
393+
max: {
394+
thinkingConfig: {
395+
includeThoughts: true,
396+
thinkingBudget: 24576,
397+
},
398+
},
399+
}
400+
}
401+
return Object.fromEntries(
402+
["low", "high"].map((effort) => [
403+
effort,
404+
{
405+
includeThoughts: true,
406+
thinkingLevel: effort,
407+
},
408+
]),
409+
)
410+
}
368411
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
369412

370413
case "@ai-sdk/github-copilot":
@@ -720,6 +763,12 @@ export namespace ProviderTransform {
720763
result["promptCacheKey"] = input.sessionID
721764
}
722765

766+
if (input.model.api.npm === "@ai-sdk/gateway") {
767+
result["gateway"] = {
768+
caching: "auto",
769+
}
770+
}
771+
723772
return result
724773
}
725774

@@ -754,6 +803,34 @@ export namespace ProviderTransform {
754803
}
755804

756805
export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
806+
if (model.api.npm === "@ai-sdk/gateway") {
807+
// Gateway providerOptions are split across two namespaces:
808+
// - `gateway`: gateway-native routing/caching controls
809+
// - `<upstream slug>`: provider-specific model options (anthropic/openai/...)
810+
// We keep `gateway` as-is and route every other top-level option under the
811+
// model-derived upstream slug so variants/options can stay flat internally.
812+
const i = model.api.id.indexOf("/")
813+
const slug = i > 0 ? model.api.id.slice(0, i) : undefined
814+
const gateway = options.gateway
815+
const rest = Object.fromEntries(Object.entries(options).filter(([k]) => k !== "gateway"))
816+
const has = Object.keys(rest).length > 0
817+
818+
const result: Record<string, any> = {}
819+
if (gateway !== undefined) result.gateway = gateway
820+
821+
if (has) {
822+
if (slug) {
823+
result[slug] = rest
824+
} else if (gateway && typeof gateway === "object" && !Array.isArray(gateway)) {
825+
result.gateway = { ...gateway, ...rest }
826+
} else {
827+
result.gateway = rest
828+
}
829+
}
830+
831+
return result
832+
}
833+
757834
const key = sdkKey(model.api.npm) ?? model.providerID
758835
return { [key]: options }
759836
}

0 commit comments

Comments
 (0)