@@ -11,9 +11,17 @@ import io.cequence.openaiscala.domain.responsesapi.tools.Tool
1111 * @param include
1212 * Specify additional output data to include in the model response. Currently supported
1313 * values are:
14+ * - web_search_call.action.sources: Include the sources of the web search tool call.
15+ * - code_interpreter_call.outputs: Includes the outputs of python code execution in code
16+ * interpreter tool call items.
17+ * - computer_call_output.output.image_url: Include image urls from the computer call output.
1418 * - file_search_call.results: Include the search results of the file search tool call.
1519 * - message.input_image.image_url: Include image urls from the input message.
16- * - computer_call_output.output.image_url: Include image urls from the computer call output.
20+ * - message.output_text.logprobs: Include logprobs with assistant messages.
21+ * - reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in
22+ * reasoning item outputs. This enables reasoning items to be used in multi-turn
23+ * conversations when using the Responses API statelessly (like when the store parameter is
24+ * set to false, or when an organization is enrolled in the zero data retention program).
1725 * @param instructions
1826 * Inserts a system (or developer) message as the first item in the model's context.
1927 * @param maxOutputTokens
@@ -52,6 +60,36 @@ import io.cequence.openaiscala.domain.responsesapi.tools.Tool
5260 * @param user
5361 * A unique identifier representing your end-user, which can help OpenAI to monitor and
5462 * detect abuse. Learn more.
63+ * @param prompt
64+ * Reference to a prompt template and its variables.
65+ * @param promptCacheKey
66+ * Used by OpenAI to cache responses for similar requests to optimize your cache hit rates.
67+ * Replaces the user field.
68+ * @param background
69+ * Whether to run the model response in the background. Optional, defaults to false.
70+ * @param maxToolCalls
71+ * The maximum number of total calls to built-in tools that can be processed in a response.
72+ * This maximum number applies across all built-in tool calls, not per individual tool. Any
73+ * further attempts to call a tool by the model will be ignored. Optional.
74+ * @param safetyIdentifier
75+ * A stable identifier used to help detect users of your application that may be violating
76+ * OpenAI's usage policies. The IDs should be a string that uniquely identifies each user. We
77+ * recommend hashing their username or email address, in order to avoid sending us any
78+ * identifying information Optional.
79+ * @param serviceTier
80+ * Specifies the processing type used for serving the request.
81+ * - If set to 'auto', then the request will be processed with the service tier configured in
82+ * the Project settings. Unless otherwise configured, the Project will use 'default'.
83+ * - If set to 'default', then the request will be processed with the standard pricing and
84+ * performance for the selected model.
85+ * - If set to 'flex' or 'priority', then the request will be processed with the
86+ * corresponding service tier.
87+ * - When not set, the default behavior is 'auto'.
88+ * @param streamOptions
89+ * Options for streaming responses.
90+ * @param topLogprobs
91+ * An integer between 0 and 20 specifying the number of most likely tokens to return at each
92+ * token position, each with an associated log probability. Optional
5593 */
5694final case class CreateModelResponseSettings (
5795 model : String ,
@@ -70,5 +108,112 @@ final case class CreateModelResponseSettings(
70108 tools : Seq [Tool ] = Nil ,
71109 topP : Option [Double ] = None ,
72110 truncation : Option [TruncationStrategy ] = None ,
73- user : Option [String ] = None
111+ user : Option [String ] = None ,
112+ prompt : Option [Prompt ] = None ,
113+ promptCacheKey : Option [String ] = None ,
114+ background : Option [Boolean ] = None ,
115+ maxToolCalls : Option [Int ] = None ,
116+ safetyIdentifier : Option [String ] = None ,
117+ serviceTier : Option [String ] = None ,
118+ streamOptions : Option [StreamOptions ] = None ,
119+ topLogprobs : Option [Int ] = None
120+ )
121+
122+ object CreateModelResponseSettings {
123+
124+ def toAuxPart1 (x : CreateModelResponseSettings ) =
125+ CreateModelResponseSettingsAuxPart1 (
126+ model = x.model,
127+ include = x.include,
128+ instructions = x.instructions,
129+ maxOutputTokens = x.maxOutputTokens,
130+ metadata = x.metadata,
131+ parallelToolCalls = x.parallelToolCalls,
132+ previousResponseId = x.previousResponseId,
133+ reasoning = x.reasoning,
134+ store = x.store,
135+ stream = x.stream,
136+ temperature = x.temperature,
137+ text = x.text
138+ )
139+
140+ def toAuxPart2 (x : CreateModelResponseSettings ) =
141+ CreateModelResponseSettingsAuxPart2 (
142+ toolChoice = x.toolChoice,
143+ tools = x.tools,
144+ topP = x.topP,
145+ truncation = x.truncation,
146+ user = x.user,
147+ prompt = x.prompt,
148+ promptCacheKey = x.promptCacheKey,
149+ background = x.background,
150+ maxToolCalls = x.maxToolCalls,
151+ safetyIdentifier = x.safetyIdentifier,
152+ serviceTier = x.serviceTier,
153+ streamOptions = x.streamOptions,
154+ topLogprobs = x.topLogprobs
155+ )
156+
157+ private def fromParts (
158+ part1 : CreateModelResponseSettingsAuxPart1 ,
159+ part2 : CreateModelResponseSettingsAuxPart2
160+ ) =
161+ CreateModelResponseSettings (
162+ model = part1.model,
163+ include = part1.include,
164+ instructions = part1.instructions,
165+ maxOutputTokens = part1.maxOutputTokens,
166+ metadata = part1.metadata,
167+ parallelToolCalls = part1.parallelToolCalls,
168+ previousResponseId = part1.previousResponseId,
169+ reasoning = part1.reasoning,
170+ store = part1.store,
171+ stream = part1.stream,
172+ temperature = part1.temperature,
173+ text = part1.text,
174+ toolChoice = part2.toolChoice,
175+ tools = part2.tools,
176+ topP = part2.topP,
177+ truncation = part2.truncation,
178+ user = part2.user,
179+ prompt = part2.prompt,
180+ promptCacheKey = part2.promptCacheKey,
181+ background = part2.background,
182+ maxToolCalls = part2.maxToolCalls,
183+ safetyIdentifier = part2.safetyIdentifier,
184+ serviceTier = part2.serviceTier,
185+ streamOptions = part2.streamOptions,
186+ topLogprobs = part2.topLogprobs
187+ )
188+ }
189+
190+ final case class CreateModelResponseSettingsAuxPart1 (
191+ model : String ,
192+ include : Seq [String ],
193+ instructions : Option [String ],
194+ maxOutputTokens : Option [Int ],
195+ metadata : Option [Map [String , String ]],
196+ parallelToolCalls : Option [Boolean ],
197+ previousResponseId : Option [String ],
198+ reasoning : Option [ReasoningConfig ],
199+ store : Option [Boolean ],
200+ stream : Option [Boolean ],
201+ temperature : Option [Double ],
202+ text : Option [TextResponseConfig ]
203+ )
204+
205+ final case class CreateModelResponseSettingsAuxPart2 (
206+ toolChoice : Option [ToolChoice ],
207+ tools : Seq [Tool ],
208+ topP : Option [Double ],
209+ truncation : Option [TruncationStrategy ],
210+ user : Option [String ],
211+ prompt : Option [Prompt ],
212+ promptCacheKey : Option [String ],
213+ background : Option [Boolean ],
214+ maxToolCalls : Option [Int ],
215+ safetyIdentifier : Option [String ],
216+ serviceTier : Option [String ],
217+ streamOptions : Option [StreamOptions ],
218+ topLogprobs : Option [Int ]
74219)
0 commit comments