mirror of
https://github.com/openai/openai-go.git
synced 2026-03-31 08:37:22 +09:00
1338 lines
55 KiB
Go
1338 lines
55 KiB
Go
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
|
|
package openai
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"io"
|
|
"mime/multipart"
|
|
"net/http"
|
|
"slices"
|
|
|
|
"github.com/openai/openai-go/v3/internal/apiform"
|
|
"github.com/openai/openai-go/v3/internal/apijson"
|
|
"github.com/openai/openai-go/v3/internal/requestconfig"
|
|
"github.com/openai/openai-go/v3/option"
|
|
"github.com/openai/openai-go/v3/packages/param"
|
|
"github.com/openai/openai-go/v3/packages/respjson"
|
|
"github.com/openai/openai-go/v3/packages/ssestream"
|
|
"github.com/openai/openai-go/v3/shared/constant"
|
|
)
|
|
|
|
// Given a prompt and/or an input image, the model will generate a new image.
|
|
//
|
|
// ImageService contains methods and other services that help with interacting with
|
|
// the openai API.
|
|
//
|
|
// Note, unlike clients, this service does not read variables from the environment
|
|
// automatically. You should not instantiate this service directly, and instead use
|
|
// the [NewImageService] method instead.
|
|
type ImageService struct {
|
|
Options []option.RequestOption
|
|
}
|
|
|
|
// NewImageService generates a new service that applies the given options to each
|
|
// request. These options are applied after the parent client's options (if there
|
|
// is one), and before any request-specific options.
|
|
func NewImageService(opts ...option.RequestOption) (r ImageService) {
|
|
r = ImageService{}
|
|
r.Options = opts
|
|
return
|
|
}
|
|
|
|
// Creates a variation of a given image. This endpoint only supports `dall-e-2`.
|
|
func (r *ImageService) NewVariation(ctx context.Context, body ImageNewVariationParams, opts ...option.RequestOption) (res *ImagesResponse, err error) {
|
|
opts = slices.Concat(r.Options, opts)
|
|
path := "images/variations"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
|
|
return res, err
|
|
}
|
|
|
|
// Creates an edited or extended image given one or more source images and a
|
|
// prompt. This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
|
|
// `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
|
|
func (r *ImageService) Edit(ctx context.Context, body ImageEditParams, opts ...option.RequestOption) (res *ImagesResponse, err error) {
|
|
opts = slices.Concat(r.Options, opts)
|
|
path := "images/edits"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
|
|
return res, err
|
|
}
|
|
|
|
// Creates an edited or extended image given one or more source images and a
|
|
// prompt. This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
|
|
// `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
|
|
func (r *ImageService) EditStreaming(ctx context.Context, body ImageEditParams, opts ...option.RequestOption) (stream *ssestream.Stream[ImageEditStreamEventUnion]) {
|
|
var (
|
|
raw *http.Response
|
|
err error
|
|
)
|
|
opts = slices.Concat(r.Options, opts)
|
|
body.SetExtraFields(map[string]any{
|
|
"stream": "true",
|
|
})
|
|
path := "images/edits"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...)
|
|
return ssestream.NewStream[ImageEditStreamEventUnion](ssestream.NewDecoder(raw), err)
|
|
}
|
|
|
|
// Creates an image given a prompt.
|
|
// [Learn more](https://platform.openai.com/docs/guides/images).
|
|
func (r *ImageService) Generate(ctx context.Context, body ImageGenerateParams, opts ...option.RequestOption) (res *ImagesResponse, err error) {
|
|
opts = slices.Concat(r.Options, opts)
|
|
path := "images/generations"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
|
|
return res, err
|
|
}
|
|
|
|
// Creates an image given a prompt.
|
|
// [Learn more](https://platform.openai.com/docs/guides/images).
|
|
func (r *ImageService) GenerateStreaming(ctx context.Context, body ImageGenerateParams, opts ...option.RequestOption) (stream *ssestream.Stream[ImageGenStreamEventUnion]) {
|
|
var (
|
|
raw *http.Response
|
|
err error
|
|
)
|
|
opts = slices.Concat(r.Options, opts)
|
|
opts = append(opts, option.WithJSONSet("stream", true))
|
|
path := "images/generations"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...)
|
|
return ssestream.NewStream[ImageGenStreamEventUnion](ssestream.NewDecoder(raw), err)
|
|
}
|
|
|
|
// Represents the content or the URL of an image generated by the OpenAI API.
|
|
type Image struct {
|
|
// The base64-encoded JSON of the generated image. Returned by default for the GPT
|
|
// image models, and only present if `response_format` is set to `b64_json` for
|
|
// `dall-e-2` and `dall-e-3`.
|
|
B64JSON string `json:"b64_json"`
|
|
// For `dall-e-3` only, the revised prompt that was used to generate the image.
|
|
RevisedPrompt string `json:"revised_prompt"`
|
|
// When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
|
|
// `response_format` is set to `url` (default value). Unsupported for the GPT image
|
|
// models.
|
|
URL string `json:"url"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
RevisedPrompt respjson.Field
|
|
URL respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r Image) RawJSON() string { return r.JSON.raw }
|
|
func (r *Image) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// Emitted when image editing has completed and the final image is available.
|
|
type ImageEditCompletedEvent struct {
|
|
// Base64-encoded final edited image data, suitable for rendering as an image.
|
|
B64JSON string `json:"b64_json" api:"required"`
|
|
// The background setting for the edited image.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageEditCompletedEventBackground `json:"background" api:"required"`
|
|
// The Unix timestamp when the event was created.
|
|
CreatedAt int64 `json:"created_at" api:"required"`
|
|
// The output format for the edited image.
|
|
//
|
|
// Any of "png", "webp", "jpeg".
|
|
OutputFormat ImageEditCompletedEventOutputFormat `json:"output_format" api:"required"`
|
|
// The quality setting for the edited image.
|
|
//
|
|
// Any of "low", "medium", "high", "auto".
|
|
Quality ImageEditCompletedEventQuality `json:"quality" api:"required"`
|
|
// The size of the edited image.
|
|
//
|
|
// Any of "1024x1024", "1024x1536", "1536x1024", "auto".
|
|
Size ImageEditCompletedEventSize `json:"size" api:"required"`
|
|
// The type of the event. Always `image_edit.completed`.
|
|
Type constant.ImageEditCompleted `json:"type" default:"image_edit.completed"`
|
|
// For the GPT image models only, the token usage information for the image
|
|
// generation.
|
|
Usage ImageEditCompletedEventUsage `json:"usage" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
Background respjson.Field
|
|
CreatedAt respjson.Field
|
|
OutputFormat respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Type respjson.Field
|
|
Usage respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageEditCompletedEvent) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageEditCompletedEvent) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The background setting for the edited image.
|
|
type ImageEditCompletedEventBackground string
|
|
|
|
const (
|
|
ImageEditCompletedEventBackgroundTransparent ImageEditCompletedEventBackground = "transparent"
|
|
ImageEditCompletedEventBackgroundOpaque ImageEditCompletedEventBackground = "opaque"
|
|
ImageEditCompletedEventBackgroundAuto ImageEditCompletedEventBackground = "auto"
|
|
)
|
|
|
|
// The output format for the edited image.
|
|
type ImageEditCompletedEventOutputFormat string
|
|
|
|
const (
|
|
ImageEditCompletedEventOutputFormatPNG ImageEditCompletedEventOutputFormat = "png"
|
|
ImageEditCompletedEventOutputFormatWebP ImageEditCompletedEventOutputFormat = "webp"
|
|
ImageEditCompletedEventOutputFormatJPEG ImageEditCompletedEventOutputFormat = "jpeg"
|
|
)
|
|
|
|
// The quality setting for the edited image.
|
|
type ImageEditCompletedEventQuality string
|
|
|
|
const (
|
|
ImageEditCompletedEventQualityLow ImageEditCompletedEventQuality = "low"
|
|
ImageEditCompletedEventQualityMedium ImageEditCompletedEventQuality = "medium"
|
|
ImageEditCompletedEventQualityHigh ImageEditCompletedEventQuality = "high"
|
|
ImageEditCompletedEventQualityAuto ImageEditCompletedEventQuality = "auto"
|
|
)
|
|
|
|
// The size of the edited image.
|
|
type ImageEditCompletedEventSize string
|
|
|
|
const (
|
|
ImageEditCompletedEventSize1024x1024 ImageEditCompletedEventSize = "1024x1024"
|
|
ImageEditCompletedEventSize1024x1536 ImageEditCompletedEventSize = "1024x1536"
|
|
ImageEditCompletedEventSize1536x1024 ImageEditCompletedEventSize = "1536x1024"
|
|
ImageEditCompletedEventSizeAuto ImageEditCompletedEventSize = "auto"
|
|
)
|
|
|
|
// For the GPT image models only, the token usage information for the image
|
|
// generation.
|
|
type ImageEditCompletedEventUsage struct {
|
|
// The number of tokens (images and text) in the input prompt.
|
|
InputTokens int64 `json:"input_tokens" api:"required"`
|
|
// The input tokens detailed information for the image generation.
|
|
InputTokensDetails ImageEditCompletedEventUsageInputTokensDetails `json:"input_tokens_details" api:"required"`
|
|
// The number of image tokens in the output image.
|
|
OutputTokens int64 `json:"output_tokens" api:"required"`
|
|
// The total number of tokens (images and text) used for the image generation.
|
|
TotalTokens int64 `json:"total_tokens" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
InputTokens respjson.Field
|
|
InputTokensDetails respjson.Field
|
|
OutputTokens respjson.Field
|
|
TotalTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageEditCompletedEventUsage) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageEditCompletedEventUsage) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The input tokens detailed information for the image generation.
|
|
type ImageEditCompletedEventUsageInputTokensDetails struct {
|
|
// The number of image tokens in the input prompt.
|
|
ImageTokens int64 `json:"image_tokens" api:"required"`
|
|
// The number of text tokens in the input prompt.
|
|
TextTokens int64 `json:"text_tokens" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
ImageTokens respjson.Field
|
|
TextTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageEditCompletedEventUsageInputTokensDetails) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageEditCompletedEventUsageInputTokensDetails) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// Emitted when a partial image is available during image editing streaming.
|
|
type ImageEditPartialImageEvent struct {
|
|
// Base64-encoded partial image data, suitable for rendering as an image.
|
|
B64JSON string `json:"b64_json" api:"required"`
|
|
// The background setting for the requested edited image.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageEditPartialImageEventBackground `json:"background" api:"required"`
|
|
// The Unix timestamp when the event was created.
|
|
CreatedAt int64 `json:"created_at" api:"required"`
|
|
// The output format for the requested edited image.
|
|
//
|
|
// Any of "png", "webp", "jpeg".
|
|
OutputFormat ImageEditPartialImageEventOutputFormat `json:"output_format" api:"required"`
|
|
// 0-based index for the partial image (streaming).
|
|
PartialImageIndex int64 `json:"partial_image_index" api:"required"`
|
|
// The quality setting for the requested edited image.
|
|
//
|
|
// Any of "low", "medium", "high", "auto".
|
|
Quality ImageEditPartialImageEventQuality `json:"quality" api:"required"`
|
|
// The size of the requested edited image.
|
|
//
|
|
// Any of "1024x1024", "1024x1536", "1536x1024", "auto".
|
|
Size ImageEditPartialImageEventSize `json:"size" api:"required"`
|
|
// The type of the event. Always `image_edit.partial_image`.
|
|
Type constant.ImageEditPartialImage `json:"type" default:"image_edit.partial_image"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
Background respjson.Field
|
|
CreatedAt respjson.Field
|
|
OutputFormat respjson.Field
|
|
PartialImageIndex respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Type respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageEditPartialImageEvent) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageEditPartialImageEvent) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The background setting for the requested edited image.
|
|
type ImageEditPartialImageEventBackground string
|
|
|
|
const (
|
|
ImageEditPartialImageEventBackgroundTransparent ImageEditPartialImageEventBackground = "transparent"
|
|
ImageEditPartialImageEventBackgroundOpaque ImageEditPartialImageEventBackground = "opaque"
|
|
ImageEditPartialImageEventBackgroundAuto ImageEditPartialImageEventBackground = "auto"
|
|
)
|
|
|
|
// The output format for the requested edited image.
|
|
type ImageEditPartialImageEventOutputFormat string
|
|
|
|
const (
|
|
ImageEditPartialImageEventOutputFormatPNG ImageEditPartialImageEventOutputFormat = "png"
|
|
ImageEditPartialImageEventOutputFormatWebP ImageEditPartialImageEventOutputFormat = "webp"
|
|
ImageEditPartialImageEventOutputFormatJPEG ImageEditPartialImageEventOutputFormat = "jpeg"
|
|
)
|
|
|
|
// The quality setting for the requested edited image.
|
|
type ImageEditPartialImageEventQuality string
|
|
|
|
const (
|
|
ImageEditPartialImageEventQualityLow ImageEditPartialImageEventQuality = "low"
|
|
ImageEditPartialImageEventQualityMedium ImageEditPartialImageEventQuality = "medium"
|
|
ImageEditPartialImageEventQualityHigh ImageEditPartialImageEventQuality = "high"
|
|
ImageEditPartialImageEventQualityAuto ImageEditPartialImageEventQuality = "auto"
|
|
)
|
|
|
|
// The size of the requested edited image.
|
|
type ImageEditPartialImageEventSize string
|
|
|
|
const (
|
|
ImageEditPartialImageEventSize1024x1024 ImageEditPartialImageEventSize = "1024x1024"
|
|
ImageEditPartialImageEventSize1024x1536 ImageEditPartialImageEventSize = "1024x1536"
|
|
ImageEditPartialImageEventSize1536x1024 ImageEditPartialImageEventSize = "1536x1024"
|
|
ImageEditPartialImageEventSizeAuto ImageEditPartialImageEventSize = "auto"
|
|
)
|
|
|
|
// ImageEditStreamEventUnion contains all possible properties and values from
|
|
// [ImageEditPartialImageEvent], [ImageEditCompletedEvent].
|
|
//
|
|
// Use the [ImageEditStreamEventUnion.AsAny] method to switch on the variant.
|
|
//
|
|
// Use the methods beginning with 'As' to cast the union to one of its variants.
|
|
type ImageEditStreamEventUnion struct {
|
|
B64JSON string `json:"b64_json"`
|
|
Background string `json:"background"`
|
|
CreatedAt int64 `json:"created_at"`
|
|
OutputFormat string `json:"output_format"`
|
|
// This field is from variant [ImageEditPartialImageEvent].
|
|
PartialImageIndex int64 `json:"partial_image_index"`
|
|
Quality string `json:"quality"`
|
|
Size string `json:"size"`
|
|
// Any of "image_edit.partial_image", "image_edit.completed".
|
|
Type string `json:"type"`
|
|
// This field is from variant [ImageEditCompletedEvent].
|
|
Usage ImageEditCompletedEventUsage `json:"usage"`
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
Background respjson.Field
|
|
CreatedAt respjson.Field
|
|
OutputFormat respjson.Field
|
|
PartialImageIndex respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Type respjson.Field
|
|
Usage respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// anyImageEditStreamEvent is implemented by each variant of
|
|
// [ImageEditStreamEventUnion] to add type safety for the return type of
|
|
// [ImageEditStreamEventUnion.AsAny]
|
|
type anyImageEditStreamEvent interface {
|
|
implImageEditStreamEventUnion()
|
|
}
|
|
|
|
func (ImageEditPartialImageEvent) implImageEditStreamEventUnion() {}
|
|
func (ImageEditCompletedEvent) implImageEditStreamEventUnion() {}
|
|
|
|
// Use the following switch statement to find the correct variant
|
|
//
|
|
// switch variant := ImageEditStreamEventUnion.AsAny().(type) {
|
|
// case openai.ImageEditPartialImageEvent:
|
|
// case openai.ImageEditCompletedEvent:
|
|
// default:
|
|
// fmt.Errorf("no variant present")
|
|
// }
|
|
func (u ImageEditStreamEventUnion) AsAny() anyImageEditStreamEvent {
|
|
switch u.Type {
|
|
case "image_edit.partial_image":
|
|
return u.AsImageEditPartialImage()
|
|
case "image_edit.completed":
|
|
return u.AsImageEditCompleted()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (u ImageEditStreamEventUnion) AsImageEditPartialImage() (v ImageEditPartialImageEvent) {
|
|
apijson.UnmarshalRoot(json.RawMessage(u.JSON.raw), &v)
|
|
return
|
|
}
|
|
|
|
func (u ImageEditStreamEventUnion) AsImageEditCompleted() (v ImageEditCompletedEvent) {
|
|
apijson.UnmarshalRoot(json.RawMessage(u.JSON.raw), &v)
|
|
return
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (u ImageEditStreamEventUnion) RawJSON() string { return u.JSON.raw }
|
|
|
|
func (r *ImageEditStreamEventUnion) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// Emitted when image generation has completed and the final image is available.
|
|
type ImageGenCompletedEvent struct {
|
|
// Base64-encoded image data, suitable for rendering as an image.
|
|
B64JSON string `json:"b64_json" api:"required"`
|
|
// The background setting for the generated image.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageGenCompletedEventBackground `json:"background" api:"required"`
|
|
// The Unix timestamp when the event was created.
|
|
CreatedAt int64 `json:"created_at" api:"required"`
|
|
// The output format for the generated image.
|
|
//
|
|
// Any of "png", "webp", "jpeg".
|
|
OutputFormat ImageGenCompletedEventOutputFormat `json:"output_format" api:"required"`
|
|
// The quality setting for the generated image.
|
|
//
|
|
// Any of "low", "medium", "high", "auto".
|
|
Quality ImageGenCompletedEventQuality `json:"quality" api:"required"`
|
|
// The size of the generated image.
|
|
//
|
|
// Any of "1024x1024", "1024x1536", "1536x1024", "auto".
|
|
Size ImageGenCompletedEventSize `json:"size" api:"required"`
|
|
// The type of the event. Always `image_generation.completed`.
|
|
Type constant.ImageGenerationCompleted `json:"type" default:"image_generation.completed"`
|
|
// For the GPT image models only, the token usage information for the image
|
|
// generation.
|
|
Usage ImageGenCompletedEventUsage `json:"usage" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
Background respjson.Field
|
|
CreatedAt respjson.Field
|
|
OutputFormat respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Type respjson.Field
|
|
Usage respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageGenCompletedEvent) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageGenCompletedEvent) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The background setting for the generated image.
|
|
type ImageGenCompletedEventBackground string
|
|
|
|
const (
|
|
ImageGenCompletedEventBackgroundTransparent ImageGenCompletedEventBackground = "transparent"
|
|
ImageGenCompletedEventBackgroundOpaque ImageGenCompletedEventBackground = "opaque"
|
|
ImageGenCompletedEventBackgroundAuto ImageGenCompletedEventBackground = "auto"
|
|
)
|
|
|
|
// The output format for the generated image.
|
|
type ImageGenCompletedEventOutputFormat string
|
|
|
|
const (
|
|
ImageGenCompletedEventOutputFormatPNG ImageGenCompletedEventOutputFormat = "png"
|
|
ImageGenCompletedEventOutputFormatWebP ImageGenCompletedEventOutputFormat = "webp"
|
|
ImageGenCompletedEventOutputFormatJPEG ImageGenCompletedEventOutputFormat = "jpeg"
|
|
)
|
|
|
|
// The quality setting for the generated image.
|
|
type ImageGenCompletedEventQuality string
|
|
|
|
const (
|
|
ImageGenCompletedEventQualityLow ImageGenCompletedEventQuality = "low"
|
|
ImageGenCompletedEventQualityMedium ImageGenCompletedEventQuality = "medium"
|
|
ImageGenCompletedEventQualityHigh ImageGenCompletedEventQuality = "high"
|
|
ImageGenCompletedEventQualityAuto ImageGenCompletedEventQuality = "auto"
|
|
)
|
|
|
|
// The size of the generated image.
|
|
type ImageGenCompletedEventSize string
|
|
|
|
const (
|
|
ImageGenCompletedEventSize1024x1024 ImageGenCompletedEventSize = "1024x1024"
|
|
ImageGenCompletedEventSize1024x1536 ImageGenCompletedEventSize = "1024x1536"
|
|
ImageGenCompletedEventSize1536x1024 ImageGenCompletedEventSize = "1536x1024"
|
|
ImageGenCompletedEventSizeAuto ImageGenCompletedEventSize = "auto"
|
|
)
|
|
|
|
// For the GPT image models only, the token usage information for the image
|
|
// generation.
|
|
type ImageGenCompletedEventUsage struct {
|
|
// The number of tokens (images and text) in the input prompt.
|
|
InputTokens int64 `json:"input_tokens" api:"required"`
|
|
// The input tokens detailed information for the image generation.
|
|
InputTokensDetails ImageGenCompletedEventUsageInputTokensDetails `json:"input_tokens_details" api:"required"`
|
|
// The number of image tokens in the output image.
|
|
OutputTokens int64 `json:"output_tokens" api:"required"`
|
|
// The total number of tokens (images and text) used for the image generation.
|
|
TotalTokens int64 `json:"total_tokens" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
InputTokens respjson.Field
|
|
InputTokensDetails respjson.Field
|
|
OutputTokens respjson.Field
|
|
TotalTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageGenCompletedEventUsage) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageGenCompletedEventUsage) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The input tokens detailed information for the image generation.
|
|
type ImageGenCompletedEventUsageInputTokensDetails struct {
|
|
// The number of image tokens in the input prompt.
|
|
ImageTokens int64 `json:"image_tokens" api:"required"`
|
|
// The number of text tokens in the input prompt.
|
|
TextTokens int64 `json:"text_tokens" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
ImageTokens respjson.Field
|
|
TextTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageGenCompletedEventUsageInputTokensDetails) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageGenCompletedEventUsageInputTokensDetails) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// Emitted when a partial image is available during image generation streaming.
|
|
type ImageGenPartialImageEvent struct {
|
|
// Base64-encoded partial image data, suitable for rendering as an image.
|
|
B64JSON string `json:"b64_json" api:"required"`
|
|
// The background setting for the requested image.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageGenPartialImageEventBackground `json:"background" api:"required"`
|
|
// The Unix timestamp when the event was created.
|
|
CreatedAt int64 `json:"created_at" api:"required"`
|
|
// The output format for the requested image.
|
|
//
|
|
// Any of "png", "webp", "jpeg".
|
|
OutputFormat ImageGenPartialImageEventOutputFormat `json:"output_format" api:"required"`
|
|
// 0-based index for the partial image (streaming).
|
|
PartialImageIndex int64 `json:"partial_image_index" api:"required"`
|
|
// The quality setting for the requested image.
|
|
//
|
|
// Any of "low", "medium", "high", "auto".
|
|
Quality ImageGenPartialImageEventQuality `json:"quality" api:"required"`
|
|
// The size of the requested image.
|
|
//
|
|
// Any of "1024x1024", "1024x1536", "1536x1024", "auto".
|
|
Size ImageGenPartialImageEventSize `json:"size" api:"required"`
|
|
// The type of the event. Always `image_generation.partial_image`.
|
|
Type constant.ImageGenerationPartialImage `json:"type" default:"image_generation.partial_image"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
Background respjson.Field
|
|
CreatedAt respjson.Field
|
|
OutputFormat respjson.Field
|
|
PartialImageIndex respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Type respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImageGenPartialImageEvent) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImageGenPartialImageEvent) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The background setting for the requested image.
|
|
type ImageGenPartialImageEventBackground string
|
|
|
|
const (
|
|
ImageGenPartialImageEventBackgroundTransparent ImageGenPartialImageEventBackground = "transparent"
|
|
ImageGenPartialImageEventBackgroundOpaque ImageGenPartialImageEventBackground = "opaque"
|
|
ImageGenPartialImageEventBackgroundAuto ImageGenPartialImageEventBackground = "auto"
|
|
)
|
|
|
|
// The output format for the requested image.
|
|
type ImageGenPartialImageEventOutputFormat string
|
|
|
|
const (
|
|
ImageGenPartialImageEventOutputFormatPNG ImageGenPartialImageEventOutputFormat = "png"
|
|
ImageGenPartialImageEventOutputFormatWebP ImageGenPartialImageEventOutputFormat = "webp"
|
|
ImageGenPartialImageEventOutputFormatJPEG ImageGenPartialImageEventOutputFormat = "jpeg"
|
|
)
|
|
|
|
// The quality setting for the requested image.
|
|
type ImageGenPartialImageEventQuality string
|
|
|
|
const (
|
|
ImageGenPartialImageEventQualityLow ImageGenPartialImageEventQuality = "low"
|
|
ImageGenPartialImageEventQualityMedium ImageGenPartialImageEventQuality = "medium"
|
|
ImageGenPartialImageEventQualityHigh ImageGenPartialImageEventQuality = "high"
|
|
ImageGenPartialImageEventQualityAuto ImageGenPartialImageEventQuality = "auto"
|
|
)
|
|
|
|
// The size of the requested image.
|
|
type ImageGenPartialImageEventSize string
|
|
|
|
const (
|
|
ImageGenPartialImageEventSize1024x1024 ImageGenPartialImageEventSize = "1024x1024"
|
|
ImageGenPartialImageEventSize1024x1536 ImageGenPartialImageEventSize = "1024x1536"
|
|
ImageGenPartialImageEventSize1536x1024 ImageGenPartialImageEventSize = "1536x1024"
|
|
ImageGenPartialImageEventSizeAuto ImageGenPartialImageEventSize = "auto"
|
|
)
|
|
|
|
// ImageGenStreamEventUnion contains all possible properties and values from
|
|
// [ImageGenPartialImageEvent], [ImageGenCompletedEvent].
|
|
//
|
|
// Use the [ImageGenStreamEventUnion.AsAny] method to switch on the variant.
|
|
//
|
|
// Use the methods beginning with 'As' to cast the union to one of its variants.
|
|
type ImageGenStreamEventUnion struct {
|
|
B64JSON string `json:"b64_json"`
|
|
Background string `json:"background"`
|
|
CreatedAt int64 `json:"created_at"`
|
|
OutputFormat string `json:"output_format"`
|
|
// This field is from variant [ImageGenPartialImageEvent].
|
|
PartialImageIndex int64 `json:"partial_image_index"`
|
|
Quality string `json:"quality"`
|
|
Size string `json:"size"`
|
|
// Any of "image_generation.partial_image", "image_generation.completed".
|
|
Type string `json:"type"`
|
|
// This field is from variant [ImageGenCompletedEvent].
|
|
Usage ImageGenCompletedEventUsage `json:"usage"`
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
Background respjson.Field
|
|
CreatedAt respjson.Field
|
|
OutputFormat respjson.Field
|
|
PartialImageIndex respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Type respjson.Field
|
|
Usage respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// anyImageGenStreamEvent is implemented by each variant of
|
|
// [ImageGenStreamEventUnion] to add type safety for the return type of
|
|
// [ImageGenStreamEventUnion.AsAny]
|
|
type anyImageGenStreamEvent interface {
|
|
implImageGenStreamEventUnion()
|
|
}
|
|
|
|
func (ImageGenPartialImageEvent) implImageGenStreamEventUnion() {}
|
|
func (ImageGenCompletedEvent) implImageGenStreamEventUnion() {}
|
|
|
|
// Use the following switch statement to find the correct variant
|
|
//
|
|
// switch variant := ImageGenStreamEventUnion.AsAny().(type) {
|
|
// case openai.ImageGenPartialImageEvent:
|
|
// case openai.ImageGenCompletedEvent:
|
|
// default:
|
|
// fmt.Errorf("no variant present")
|
|
// }
|
|
func (u ImageGenStreamEventUnion) AsAny() anyImageGenStreamEvent {
|
|
switch u.Type {
|
|
case "image_generation.partial_image":
|
|
return u.AsImageGenerationPartialImage()
|
|
case "image_generation.completed":
|
|
return u.AsImageGenerationCompleted()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (u ImageGenStreamEventUnion) AsImageGenerationPartialImage() (v ImageGenPartialImageEvent) {
|
|
apijson.UnmarshalRoot(json.RawMessage(u.JSON.raw), &v)
|
|
return
|
|
}
|
|
|
|
func (u ImageGenStreamEventUnion) AsImageGenerationCompleted() (v ImageGenCompletedEvent) {
|
|
apijson.UnmarshalRoot(json.RawMessage(u.JSON.raw), &v)
|
|
return
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (u ImageGenStreamEventUnion) RawJSON() string { return u.JSON.raw }
|
|
|
|
func (r *ImageGenStreamEventUnion) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
type ImageModel = string
|
|
|
|
const (
|
|
ImageModelGPTImage1_5 ImageModel = "gpt-image-1.5"
|
|
ImageModelDallE2 ImageModel = "dall-e-2"
|
|
ImageModelDallE3 ImageModel = "dall-e-3"
|
|
ImageModelGPTImage1 ImageModel = "gpt-image-1"
|
|
ImageModelGPTImage1Mini ImageModel = "gpt-image-1-mini"
|
|
)
|
|
|
|
// The response from the image generation endpoint.
|
|
type ImagesResponse struct {
|
|
// The Unix timestamp (in seconds) of when the image was created.
|
|
Created int64 `json:"created" api:"required"`
|
|
// The background parameter used for the image generation. Either `transparent` or
|
|
// `opaque`.
|
|
//
|
|
// Any of "transparent", "opaque".
|
|
Background ImagesResponseBackground `json:"background"`
|
|
// The list of generated images.
|
|
Data []Image `json:"data"`
|
|
// The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
|
//
|
|
// Any of "png", "webp", "jpeg".
|
|
OutputFormat ImagesResponseOutputFormat `json:"output_format"`
|
|
// The quality of the image generated. Either `low`, `medium`, or `high`.
|
|
//
|
|
// Any of "low", "medium", "high".
|
|
Quality ImagesResponseQuality `json:"quality"`
|
|
// The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
|
// `1536x1024`.
|
|
//
|
|
// Any of "1024x1024", "1024x1536", "1536x1024".
|
|
Size ImagesResponseSize `json:"size"`
|
|
// For `gpt-image-1` only, the token usage information for the image generation.
|
|
Usage ImagesResponseUsage `json:"usage"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
Created respjson.Field
|
|
Background respjson.Field
|
|
Data respjson.Field
|
|
OutputFormat respjson.Field
|
|
Quality respjson.Field
|
|
Size respjson.Field
|
|
Usage respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponse) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponse) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The background parameter used for the image generation. Either `transparent` or
|
|
// `opaque`.
|
|
type ImagesResponseBackground string
|
|
|
|
const (
|
|
ImagesResponseBackgroundTransparent ImagesResponseBackground = "transparent"
|
|
ImagesResponseBackgroundOpaque ImagesResponseBackground = "opaque"
|
|
)
|
|
|
|
// The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
|
type ImagesResponseOutputFormat string
|
|
|
|
const (
|
|
ImagesResponseOutputFormatPNG ImagesResponseOutputFormat = "png"
|
|
ImagesResponseOutputFormatWebP ImagesResponseOutputFormat = "webp"
|
|
ImagesResponseOutputFormatJPEG ImagesResponseOutputFormat = "jpeg"
|
|
)
|
|
|
|
// The quality of the image generated. Either `low`, `medium`, or `high`.
|
|
type ImagesResponseQuality string
|
|
|
|
const (
|
|
ImagesResponseQualityLow ImagesResponseQuality = "low"
|
|
ImagesResponseQualityMedium ImagesResponseQuality = "medium"
|
|
ImagesResponseQualityHigh ImagesResponseQuality = "high"
|
|
)
|
|
|
|
// The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
|
// `1536x1024`.
|
|
type ImagesResponseSize string
|
|
|
|
const (
|
|
ImagesResponseSize1024x1024 ImagesResponseSize = "1024x1024"
|
|
ImagesResponseSize1024x1536 ImagesResponseSize = "1024x1536"
|
|
ImagesResponseSize1536x1024 ImagesResponseSize = "1536x1024"
|
|
)
|
|
|
|
// For `gpt-image-1` only, the token usage information for the image generation.
|
|
type ImagesResponseUsage struct {
|
|
// The number of tokens (images and text) in the input prompt.
|
|
InputTokens int64 `json:"input_tokens" api:"required"`
|
|
// The input tokens detailed information for the image generation.
|
|
InputTokensDetails ImagesResponseUsageInputTokensDetails `json:"input_tokens_details" api:"required"`
|
|
// The number of output tokens generated by the model.
|
|
OutputTokens int64 `json:"output_tokens" api:"required"`
|
|
// The total number of tokens (images and text) used for the image generation.
|
|
TotalTokens int64 `json:"total_tokens" api:"required"`
|
|
// The output token details for the image generation.
|
|
OutputTokensDetails ImagesResponseUsageOutputTokensDetails `json:"output_tokens_details"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
InputTokens respjson.Field
|
|
InputTokensDetails respjson.Field
|
|
OutputTokens respjson.Field
|
|
TotalTokens respjson.Field
|
|
OutputTokensDetails respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponseUsage) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponseUsage) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The input tokens detailed information for the image generation.
|
|
type ImagesResponseUsageInputTokensDetails struct {
|
|
// The number of image tokens in the input prompt.
|
|
ImageTokens int64 `json:"image_tokens" api:"required"`
|
|
// The number of text tokens in the input prompt.
|
|
TextTokens int64 `json:"text_tokens" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
ImageTokens respjson.Field
|
|
TextTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponseUsageInputTokensDetails) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponseUsageInputTokensDetails) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The output token details for the image generation.
|
|
type ImagesResponseUsageOutputTokensDetails struct {
|
|
// The number of image output tokens generated by the model.
|
|
ImageTokens int64 `json:"image_tokens" api:"required"`
|
|
// The number of text output tokens generated by the model.
|
|
TextTokens int64 `json:"text_tokens" api:"required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
ImageTokens respjson.Field
|
|
TextTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponseUsageOutputTokensDetails) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponseUsageOutputTokensDetails) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
type ImageNewVariationParams struct {
|
|
// The image to use as the basis for the variation(s). Must be a valid PNG file,
|
|
// less than 4MB, and square.
|
|
Image io.Reader `json:"image,omitzero" api:"required" format:"binary"`
|
|
// The number of images to generate. Must be between 1 and 10.
|
|
N param.Opt[int64] `json:"n,omitzero"`
|
|
// A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
// and detect abuse.
|
|
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
User param.Opt[string] `json:"user,omitzero"`
|
|
// The model to use for image generation. Only `dall-e-2` is supported at this
|
|
// time.
|
|
Model ImageModel `json:"model,omitzero"`
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated.
|
|
//
|
|
// Any of "url", "b64_json".
|
|
ResponseFormat ImageNewVariationParamsResponseFormat `json:"response_format,omitzero"`
|
|
// The size of the generated images. Must be one of `256x256`, `512x512`, or
|
|
// `1024x1024`.
|
|
//
|
|
// Any of "256x256", "512x512", "1024x1024".
|
|
Size ImageNewVariationParamsSize `json:"size,omitzero"`
|
|
paramObj
|
|
}
|
|
|
|
func (r ImageNewVariationParams) MarshalMultipart() (data []byte, contentType string, err error) {
|
|
buf := bytes.NewBuffer(nil)
|
|
writer := multipart.NewWriter(buf)
|
|
err = apiform.MarshalRoot(r, writer)
|
|
if err == nil {
|
|
err = apiform.WriteExtras(writer, r.ExtraFields())
|
|
}
|
|
if err != nil {
|
|
writer.Close()
|
|
return nil, "", err
|
|
}
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
return buf.Bytes(), writer.FormDataContentType(), nil
|
|
}
|
|
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated.
|
|
type ImageNewVariationParamsResponseFormat string
|
|
|
|
const (
|
|
ImageNewVariationParamsResponseFormatURL ImageNewVariationParamsResponseFormat = "url"
|
|
ImageNewVariationParamsResponseFormatB64JSON ImageNewVariationParamsResponseFormat = "b64_json"
|
|
)
|
|
|
|
// The size of the generated images. Must be one of `256x256`, `512x512`, or
|
|
// `1024x1024`.
|
|
type ImageNewVariationParamsSize string
|
|
|
|
const (
|
|
ImageNewVariationParamsSize256x256 ImageNewVariationParamsSize = "256x256"
|
|
ImageNewVariationParamsSize512x512 ImageNewVariationParamsSize = "512x512"
|
|
ImageNewVariationParamsSize1024x1024 ImageNewVariationParamsSize = "1024x1024"
|
|
)
|
|
|
|
type ImageEditParams struct {
|
|
// The image(s) to edit. Must be a supported image file or an array of images.
|
|
//
|
|
// For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
|
|
// `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
|
|
// 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
|
|
// input constraints as GPT image models.
|
|
//
|
|
// For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
// file less than 4MB.
|
|
Image ImageEditParamsImageUnion `json:"image,omitzero" api:"required" format:"binary"`
|
|
// A text description of the desired image(s). The maximum length is 1000
|
|
// characters for `dall-e-2`, and 32000 characters for the GPT image models.
|
|
Prompt string `json:"prompt" api:"required"`
|
|
// The number of images to generate. Must be between 1 and 10.
|
|
N param.Opt[int64] `json:"n,omitzero"`
|
|
// The compression level (0-100%) for the generated images. This parameter is only
|
|
// supported for the GPT image models with the `webp` or `jpeg` output formats, and
|
|
// defaults to 100.
|
|
OutputCompression param.Opt[int64] `json:"output_compression,omitzero"`
|
|
// The number of partial images to generate. This parameter is used for streaming
|
|
// responses that return partial images. Value must be between 0 and 3. When set to
|
|
// 0, the response will be a single image sent in one streaming event.
|
|
//
|
|
// Note that the final image may be sent before the full number of partial images
|
|
// are generated if the full image is generated more quickly.
|
|
PartialImages param.Opt[int64] `json:"partial_images,omitzero"`
|
|
// A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
// and detect abuse.
|
|
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
User param.Opt[string] `json:"user,omitzero"`
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for the GPT image models. Must be one of
|
|
// `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
// model will automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageEditParamsBackground `json:"background,omitzero"`
|
|
// Control how much effort the model will exert to match the style and features,
|
|
// especially facial features, of input images. This parameter is only supported
|
|
// for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
|
|
// `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
|
|
//
|
|
// Any of "high", "low".
|
|
InputFidelity ImageEditParamsInputFidelity `json:"input_fidelity,omitzero"`
|
|
// The model to use for image generation. Defaults to `gpt-image-1.5`.
|
|
Model ImageModel `json:"model,omitzero"`
|
|
// The format in which the generated images are returned. This parameter is only
|
|
// supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
|
|
// default value is `png`.
|
|
//
|
|
// Any of "png", "jpeg", "webp".
|
|
OutputFormat ImageEditParamsOutputFormat `json:"output_format,omitzero"`
|
|
// The quality of the image that will be generated for GPT image models. Defaults
|
|
// to `auto`.
|
|
//
|
|
// Any of "standard", "low", "medium", "high", "auto".
|
|
Quality ImageEditParamsQuality `json:"quality,omitzero"`
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated. This parameter is only supported for `dall-e-2` (default is `url` for
|
|
// `dall-e-2`), as GPT image models always return base64-encoded images.
|
|
//
|
|
// Any of "url", "b64_json".
|
|
ResponseFormat ImageEditParamsResponseFormat `json:"response_format,omitzero"`
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
// models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
//
|
|
// Any of "256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto".
|
|
Size ImageEditParamsSize `json:"size,omitzero"`
|
|
// An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
// indicate where `image` should be edited. If there are multiple images provided,
|
|
// the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
// 4MB, and have the same dimensions as `image`.
|
|
Mask io.Reader `json:"mask,omitzero" format:"binary"`
|
|
paramObj
|
|
}
|
|
|
|
func (r ImageEditParams) MarshalMultipart() (data []byte, contentType string, err error) {
|
|
buf := bytes.NewBuffer(nil)
|
|
writer := multipart.NewWriter(buf)
|
|
err = apiform.MarshalRoot(r, writer)
|
|
if err == nil {
|
|
err = apiform.WriteExtras(writer, r.ExtraFields())
|
|
}
|
|
if err != nil {
|
|
writer.Close()
|
|
return nil, "", err
|
|
}
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
return buf.Bytes(), writer.FormDataContentType(), nil
|
|
}
|
|
|
|
// Only one field can be non-zero.
|
|
//
|
|
// Use [param.IsOmitted] to confirm if a field is set.
|
|
type ImageEditParamsImageUnion struct {
|
|
OfFile io.Reader `json:",omitzero,inline"`
|
|
OfFileArray []io.Reader `json:",omitzero,inline"`
|
|
paramUnion
|
|
}
|
|
|
|
func (u ImageEditParamsImageUnion) MarshalJSON() ([]byte, error) {
|
|
return param.MarshalUnion(u, u.OfFile, u.OfFileArray)
|
|
}
|
|
func (u *ImageEditParamsImageUnion) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, u)
|
|
}
|
|
|
|
func (u *ImageEditParamsImageUnion) asAny() any {
|
|
if !param.IsOmitted(u.OfFile) {
|
|
return &u.OfFile
|
|
} else if !param.IsOmitted(u.OfFileArray) {
|
|
return &u.OfFileArray
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for the GPT image models. Must be one of
|
|
// `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
// model will automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
type ImageEditParamsBackground string
|
|
|
|
const (
|
|
ImageEditParamsBackgroundTransparent ImageEditParamsBackground = "transparent"
|
|
ImageEditParamsBackgroundOpaque ImageEditParamsBackground = "opaque"
|
|
ImageEditParamsBackgroundAuto ImageEditParamsBackground = "auto"
|
|
)
|
|
|
|
// Control how much effort the model will exert to match the style and features,
|
|
// especially facial features, of input images. This parameter is only supported
|
|
// for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
|
|
// `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
|
|
type ImageEditParamsInputFidelity string
|
|
|
|
const (
|
|
ImageEditParamsInputFidelityHigh ImageEditParamsInputFidelity = "high"
|
|
ImageEditParamsInputFidelityLow ImageEditParamsInputFidelity = "low"
|
|
)
|
|
|
|
// The format in which the generated images are returned. This parameter is only
|
|
// supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
|
|
// default value is `png`.
|
|
type ImageEditParamsOutputFormat string
|
|
|
|
const (
|
|
ImageEditParamsOutputFormatPNG ImageEditParamsOutputFormat = "png"
|
|
ImageEditParamsOutputFormatJPEG ImageEditParamsOutputFormat = "jpeg"
|
|
ImageEditParamsOutputFormatWebP ImageEditParamsOutputFormat = "webp"
|
|
)
|
|
|
|
// The quality of the image that will be generated for GPT image models. Defaults
|
|
// to `auto`.
|
|
type ImageEditParamsQuality string
|
|
|
|
const (
|
|
ImageEditParamsQualityStandard ImageEditParamsQuality = "standard"
|
|
ImageEditParamsQualityLow ImageEditParamsQuality = "low"
|
|
ImageEditParamsQualityMedium ImageEditParamsQuality = "medium"
|
|
ImageEditParamsQualityHigh ImageEditParamsQuality = "high"
|
|
ImageEditParamsQualityAuto ImageEditParamsQuality = "auto"
|
|
)
|
|
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated. This parameter is only supported for `dall-e-2` (default is `url` for
|
|
// `dall-e-2`), as GPT image models always return base64-encoded images.
|
|
type ImageEditParamsResponseFormat string
|
|
|
|
const (
|
|
ImageEditParamsResponseFormatURL ImageEditParamsResponseFormat = "url"
|
|
ImageEditParamsResponseFormatB64JSON ImageEditParamsResponseFormat = "b64_json"
|
|
)
|
|
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
// models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
type ImageEditParamsSize string
|
|
|
|
const (
|
|
ImageEditParamsSize256x256 ImageEditParamsSize = "256x256"
|
|
ImageEditParamsSize512x512 ImageEditParamsSize = "512x512"
|
|
ImageEditParamsSize1024x1024 ImageEditParamsSize = "1024x1024"
|
|
ImageEditParamsSize1536x1024 ImageEditParamsSize = "1536x1024"
|
|
ImageEditParamsSize1024x1536 ImageEditParamsSize = "1024x1536"
|
|
ImageEditParamsSizeAuto ImageEditParamsSize = "auto"
|
|
)
|
|
|
|
type ImageGenerateParams struct {
|
|
// A text description of the desired image(s). The maximum length is 32000
|
|
// characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
|
|
// characters for `dall-e-3`.
|
|
Prompt string `json:"prompt" api:"required"`
|
|
// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
// `n=1` is supported.
|
|
N param.Opt[int64] `json:"n,omitzero"`
|
|
// The compression level (0-100%) for the generated images. This parameter is only
|
|
// supported for the GPT image models with the `webp` or `jpeg` output formats, and
|
|
// defaults to 100.
|
|
OutputCompression param.Opt[int64] `json:"output_compression,omitzero"`
|
|
// The number of partial images to generate. This parameter is used for streaming
|
|
// responses that return partial images. Value must be between 0 and 3. When set to
|
|
// 0, the response will be a single image sent in one streaming event.
|
|
//
|
|
// Note that the final image may be sent before the full number of partial images
|
|
// are generated if the full image is generated more quickly.
|
|
PartialImages param.Opt[int64] `json:"partial_images,omitzero"`
|
|
// A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
// and detect abuse.
|
|
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
User param.Opt[string] `json:"user,omitzero"`
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for the GPT image models. Must be one of
|
|
// `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
// model will automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageGenerateParamsBackground `json:"background,omitzero"`
|
|
// The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
|
|
// image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
|
|
// `dall-e-2` unless a parameter specific to the GPT image models is used.
|
|
Model ImageModel `json:"model,omitzero"`
|
|
// Control the content-moderation level for images generated by the GPT image
|
|
// models. Must be either `low` for less restrictive filtering or `auto` (default
|
|
// value).
|
|
//
|
|
// Any of "low", "auto".
|
|
Moderation ImageGenerateParamsModeration `json:"moderation,omitzero"`
|
|
// The format in which the generated images are returned. This parameter is only
|
|
// supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
|
|
//
|
|
// Any of "png", "jpeg", "webp".
|
|
OutputFormat ImageGenerateParamsOutputFormat `json:"output_format,omitzero"`
|
|
// The quality of the image that will be generated.
|
|
//
|
|
// - `auto` (default value) will automatically select the best quality for the
|
|
// given model.
|
|
// - `high`, `medium` and `low` are supported for the GPT image models.
|
|
// - `hd` and `standard` are supported for `dall-e-3`.
|
|
// - `standard` is the only option for `dall-e-2`.
|
|
//
|
|
// Any of "standard", "hd", "low", "medium", "high", "auto".
|
|
Quality ImageGenerateParamsQuality `json:"quality,omitzero"`
|
|
// The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
// returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
// after the image has been generated. This parameter isn't supported for the GPT
|
|
// image models, which always return base64-encoded images.
|
|
//
|
|
// Any of "url", "b64_json".
|
|
ResponseFormat ImageGenerateParamsResponseFormat `json:"response_format,omitzero"`
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
// models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
|
|
// `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
//
|
|
// Any of "auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512",
|
|
// "1792x1024", "1024x1792".
|
|
Size ImageGenerateParamsSize `json:"size,omitzero"`
|
|
// The style of the generated images. This parameter is only supported for
|
|
// `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
// towards generating hyper-real and dramatic images. Natural causes the model to
|
|
// produce more natural, less hyper-real looking images.
|
|
//
|
|
// Any of "vivid", "natural".
|
|
Style ImageGenerateParamsStyle `json:"style,omitzero"`
|
|
paramObj
|
|
}
|
|
|
|
func (r ImageGenerateParams) MarshalJSON() (data []byte, err error) {
|
|
type shadow ImageGenerateParams
|
|
return param.MarshalObject(r, (*shadow)(&r))
|
|
}
|
|
func (r *ImageGenerateParams) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for the GPT image models. Must be one of
|
|
// `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
// model will automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
type ImageGenerateParamsBackground string
|
|
|
|
const (
|
|
ImageGenerateParamsBackgroundTransparent ImageGenerateParamsBackground = "transparent"
|
|
ImageGenerateParamsBackgroundOpaque ImageGenerateParamsBackground = "opaque"
|
|
ImageGenerateParamsBackgroundAuto ImageGenerateParamsBackground = "auto"
|
|
)
|
|
|
|
// Control the content-moderation level for images generated by the GPT image
|
|
// models. Must be either `low` for less restrictive filtering or `auto` (default
|
|
// value).
|
|
type ImageGenerateParamsModeration string
|
|
|
|
const (
|
|
ImageGenerateParamsModerationLow ImageGenerateParamsModeration = "low"
|
|
ImageGenerateParamsModerationAuto ImageGenerateParamsModeration = "auto"
|
|
)
|
|
|
|
// The format in which the generated images are returned. This parameter is only
|
|
// supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
|
|
type ImageGenerateParamsOutputFormat string
|
|
|
|
const (
|
|
ImageGenerateParamsOutputFormatPNG ImageGenerateParamsOutputFormat = "png"
|
|
ImageGenerateParamsOutputFormatJPEG ImageGenerateParamsOutputFormat = "jpeg"
|
|
ImageGenerateParamsOutputFormatWebP ImageGenerateParamsOutputFormat = "webp"
|
|
)
|
|
|
|
// The quality of the image that will be generated.
|
|
//
|
|
// - `auto` (default value) will automatically select the best quality for the
|
|
// given model.
|
|
// - `high`, `medium` and `low` are supported for the GPT image models.
|
|
// - `hd` and `standard` are supported for `dall-e-3`.
|
|
// - `standard` is the only option for `dall-e-2`.
|
|
type ImageGenerateParamsQuality string
|
|
|
|
const (
|
|
ImageGenerateParamsQualityStandard ImageGenerateParamsQuality = "standard"
|
|
ImageGenerateParamsQualityHD ImageGenerateParamsQuality = "hd"
|
|
ImageGenerateParamsQualityLow ImageGenerateParamsQuality = "low"
|
|
ImageGenerateParamsQualityMedium ImageGenerateParamsQuality = "medium"
|
|
ImageGenerateParamsQualityHigh ImageGenerateParamsQuality = "high"
|
|
ImageGenerateParamsQualityAuto ImageGenerateParamsQuality = "auto"
|
|
)
|
|
|
|
// The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
// returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
// after the image has been generated. This parameter isn't supported for the GPT
|
|
// image models, which always return base64-encoded images.
|
|
type ImageGenerateParamsResponseFormat string
|
|
|
|
const (
|
|
ImageGenerateParamsResponseFormatURL ImageGenerateParamsResponseFormat = "url"
|
|
ImageGenerateParamsResponseFormatB64JSON ImageGenerateParamsResponseFormat = "b64_json"
|
|
)
|
|
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
// models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
|
|
// `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
type ImageGenerateParamsSize string
|
|
|
|
const (
|
|
ImageGenerateParamsSizeAuto ImageGenerateParamsSize = "auto"
|
|
ImageGenerateParamsSize1024x1024 ImageGenerateParamsSize = "1024x1024"
|
|
ImageGenerateParamsSize1536x1024 ImageGenerateParamsSize = "1536x1024"
|
|
ImageGenerateParamsSize1024x1536 ImageGenerateParamsSize = "1024x1536"
|
|
ImageGenerateParamsSize256x256 ImageGenerateParamsSize = "256x256"
|
|
ImageGenerateParamsSize512x512 ImageGenerateParamsSize = "512x512"
|
|
ImageGenerateParamsSize1792x1024 ImageGenerateParamsSize = "1792x1024"
|
|
ImageGenerateParamsSize1024x1792 ImageGenerateParamsSize = "1024x1792"
|
|
)
|
|
|
|
// The style of the generated images. This parameter is only supported for
|
|
// `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
// towards generating hyper-real and dramatic images. Natural causes the model to
|
|
// produce more natural, less hyper-real looking images.
|
|
type ImageGenerateParamsStyle string
|
|
|
|
const (
|
|
ImageGenerateParamsStyleVivid ImageGenerateParamsStyle = "vivid"
|
|
ImageGenerateParamsStyleNatural ImageGenerateParamsStyle = "natural"
|
|
)
|