mirror of
https://github.com/openai/openai-go.git
synced 2026-04-01 00:57:11 +09:00
560 lines
24 KiB
Go
560 lines
24 KiB
Go
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
|
|
package openai
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"io"
|
|
"mime/multipart"
|
|
"net/http"
|
|
|
|
"github.com/openai/openai-go/internal/apiform"
|
|
"github.com/openai/openai-go/internal/apijson"
|
|
"github.com/openai/openai-go/internal/requestconfig"
|
|
"github.com/openai/openai-go/option"
|
|
"github.com/openai/openai-go/packages/param"
|
|
"github.com/openai/openai-go/packages/respjson"
|
|
)
|
|
|
|
// ImageService contains methods and other services that help with interacting with
|
|
// the openai API.
|
|
//
|
|
// Note, unlike clients, this service does not read variables from the environment
|
|
// automatically. You should not instantiate this service directly, and instead use
|
|
// the [NewImageService] method instead.
|
|
type ImageService struct {
|
|
Options []option.RequestOption
|
|
}
|
|
|
|
// NewImageService generates a new service that applies the given options to each
|
|
// request. These options are applied after the parent client's options (if there
|
|
// is one), and before any request-specific options.
|
|
func NewImageService(opts ...option.RequestOption) (r ImageService) {
|
|
r = ImageService{}
|
|
r.Options = opts
|
|
return
|
|
}
|
|
|
|
// Creates a variation of a given image. This endpoint only supports `dall-e-2`.
|
|
func (r *ImageService) NewVariation(ctx context.Context, body ImageNewVariationParams, opts ...option.RequestOption) (res *ImagesResponse, err error) {
|
|
opts = append(r.Options[:], opts...)
|
|
path := "images/variations"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
|
|
return
|
|
}
|
|
|
|
// Creates an edited or extended image given one or more source images and a
|
|
// prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
func (r *ImageService) Edit(ctx context.Context, body ImageEditParams, opts ...option.RequestOption) (res *ImagesResponse, err error) {
|
|
opts = append(r.Options[:], opts...)
|
|
path := "images/edits"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
|
|
return
|
|
}
|
|
|
|
// Creates an image given a prompt.
|
|
// [Learn more](https://platform.openai.com/docs/guides/images).
|
|
func (r *ImageService) Generate(ctx context.Context, body ImageGenerateParams, opts ...option.RequestOption) (res *ImagesResponse, err error) {
|
|
opts = append(r.Options[:], opts...)
|
|
path := "images/generations"
|
|
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
|
|
return
|
|
}
|
|
|
|
// Represents the content or the URL of an image generated by the OpenAI API.
|
|
type Image struct {
|
|
// The base64-encoded JSON of the generated image. Default value for `gpt-image-1`,
|
|
// and only present if `response_format` is set to `b64_json` for `dall-e-2` and
|
|
// `dall-e-3`.
|
|
B64JSON string `json:"b64_json"`
|
|
// For `dall-e-3` only, the revised prompt that was used to generate the image.
|
|
RevisedPrompt string `json:"revised_prompt"`
|
|
// When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
|
|
// `response_format` is set to `url` (default value). Unsupported for
|
|
// `gpt-image-1`.
|
|
URL string `json:"url"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
B64JSON respjson.Field
|
|
RevisedPrompt respjson.Field
|
|
URL respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r Image) RawJSON() string { return r.JSON.raw }
|
|
func (r *Image) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
type ImageModel = string
|
|
|
|
const (
|
|
ImageModelDallE2 ImageModel = "dall-e-2"
|
|
ImageModelDallE3 ImageModel = "dall-e-3"
|
|
ImageModelGPTImage1 ImageModel = "gpt-image-1"
|
|
)
|
|
|
|
// The response from the image generation endpoint.
|
|
type ImagesResponse struct {
|
|
// The Unix timestamp (in seconds) of when the image was created.
|
|
Created int64 `json:"created,required"`
|
|
// The list of generated images.
|
|
Data []Image `json:"data"`
|
|
// For `gpt-image-1` only, the token usage information for the image generation.
|
|
Usage ImagesResponseUsage `json:"usage"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
Created respjson.Field
|
|
Data respjson.Field
|
|
Usage respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponse) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponse) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// For `gpt-image-1` only, the token usage information for the image generation.
|
|
type ImagesResponseUsage struct {
|
|
// The number of tokens (images and text) in the input prompt.
|
|
InputTokens int64 `json:"input_tokens,required"`
|
|
// The input tokens detailed information for the image generation.
|
|
InputTokensDetails ImagesResponseUsageInputTokensDetails `json:"input_tokens_details,required"`
|
|
// The number of image tokens in the output image.
|
|
OutputTokens int64 `json:"output_tokens,required"`
|
|
// The total number of tokens (images and text) used for the image generation.
|
|
TotalTokens int64 `json:"total_tokens,required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
InputTokens respjson.Field
|
|
InputTokensDetails respjson.Field
|
|
OutputTokens respjson.Field
|
|
TotalTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponseUsage) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponseUsage) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// The input tokens detailed information for the image generation.
|
|
type ImagesResponseUsageInputTokensDetails struct {
|
|
// The number of image tokens in the input prompt.
|
|
ImageTokens int64 `json:"image_tokens,required"`
|
|
// The number of text tokens in the input prompt.
|
|
TextTokens int64 `json:"text_tokens,required"`
|
|
// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
|
|
JSON struct {
|
|
ImageTokens respjson.Field
|
|
TextTokens respjson.Field
|
|
ExtraFields map[string]respjson.Field
|
|
raw string
|
|
} `json:"-"`
|
|
}
|
|
|
|
// Returns the unmodified JSON received from the API
|
|
func (r ImagesResponseUsageInputTokensDetails) RawJSON() string { return r.JSON.raw }
|
|
func (r *ImagesResponseUsageInputTokensDetails) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
type ImageNewVariationParams struct {
|
|
// The image to use as the basis for the variation(s). Must be a valid PNG file,
|
|
// less than 4MB, and square.
|
|
Image io.Reader `json:"image,omitzero,required" format:"binary"`
|
|
// The number of images to generate. Must be between 1 and 10.
|
|
N param.Opt[int64] `json:"n,omitzero"`
|
|
// A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
// and detect abuse.
|
|
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
User param.Opt[string] `json:"user,omitzero"`
|
|
// The model to use for image generation. Only `dall-e-2` is supported at this
|
|
// time.
|
|
Model ImageModel `json:"model,omitzero"`
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated.
|
|
//
|
|
// Any of "url", "b64_json".
|
|
ResponseFormat ImageNewVariationParamsResponseFormat `json:"response_format,omitzero"`
|
|
// The size of the generated images. Must be one of `256x256`, `512x512`, or
|
|
// `1024x1024`.
|
|
//
|
|
// Any of "256x256", "512x512", "1024x1024".
|
|
Size ImageNewVariationParamsSize `json:"size,omitzero"`
|
|
paramObj
|
|
}
|
|
|
|
func (r ImageNewVariationParams) MarshalMultipart() (data []byte, contentType string, err error) {
|
|
buf := bytes.NewBuffer(nil)
|
|
writer := multipart.NewWriter(buf)
|
|
err = apiform.MarshalRoot(r, writer)
|
|
if err != nil {
|
|
writer.Close()
|
|
return nil, "", err
|
|
}
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
return buf.Bytes(), writer.FormDataContentType(), nil
|
|
}
|
|
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated.
|
|
type ImageNewVariationParamsResponseFormat string
|
|
|
|
const (
|
|
ImageNewVariationParamsResponseFormatURL ImageNewVariationParamsResponseFormat = "url"
|
|
ImageNewVariationParamsResponseFormatB64JSON ImageNewVariationParamsResponseFormat = "b64_json"
|
|
)
|
|
|
|
// The size of the generated images. Must be one of `256x256`, `512x512`, or
|
|
// `1024x1024`.
|
|
type ImageNewVariationParamsSize string
|
|
|
|
const (
|
|
ImageNewVariationParamsSize256x256 ImageNewVariationParamsSize = "256x256"
|
|
ImageNewVariationParamsSize512x512 ImageNewVariationParamsSize = "512x512"
|
|
ImageNewVariationParamsSize1024x1024 ImageNewVariationParamsSize = "1024x1024"
|
|
)
|
|
|
|
type ImageEditParams struct {
|
|
// The image(s) to edit. Must be a supported image file or an array of images.
|
|
//
|
|
// For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
// 25MB. You can provide up to 16 images.
|
|
//
|
|
// For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
// file less than 4MB.
|
|
Image ImageEditParamsImageUnion `json:"image,omitzero,required" format:"binary"`
|
|
// A text description of the desired image(s). The maximum length is 1000
|
|
// characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
Prompt string `json:"prompt,required"`
|
|
// The number of images to generate. Must be between 1 and 10.
|
|
N param.Opt[int64] `json:"n,omitzero"`
|
|
// A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
// and detect abuse.
|
|
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
User param.Opt[string] `json:"user,omitzero"`
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
// `opaque` or `auto` (default value). When `auto` is used, the model will
|
|
// automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageEditParamsBackground `json:"background,omitzero"`
|
|
// The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
// supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
// is used.
|
|
Model ImageModel `json:"model,omitzero"`
|
|
// The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
// only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
// Defaults to `auto`.
|
|
//
|
|
// Any of "standard", "low", "medium", "high", "auto".
|
|
Quality ImageEditParamsQuality `json:"quality,omitzero"`
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
// will always return base64-encoded images.
|
|
//
|
|
// Any of "url", "b64_json".
|
|
ResponseFormat ImageEditParamsResponseFormat `json:"response_format,omitzero"`
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
// `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
//
|
|
// Any of "256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto".
|
|
Size ImageEditParamsSize `json:"size,omitzero"`
|
|
// An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
// indicate where `image` should be edited. If there are multiple images provided,
|
|
// the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
// 4MB, and have the same dimensions as `image`.
|
|
Mask io.Reader `json:"mask,omitzero" format:"binary"`
|
|
paramObj
|
|
}
|
|
|
|
func (r ImageEditParams) MarshalMultipart() (data []byte, contentType string, err error) {
|
|
buf := bytes.NewBuffer(nil)
|
|
writer := multipart.NewWriter(buf)
|
|
err = apiform.MarshalRoot(r, writer)
|
|
if err != nil {
|
|
writer.Close()
|
|
return nil, "", err
|
|
}
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
return buf.Bytes(), writer.FormDataContentType(), nil
|
|
}
|
|
|
|
// Only one field can be non-zero.
|
|
//
|
|
// Use [param.IsOmitted] to confirm if a field is set.
|
|
type ImageEditParamsImageUnion struct {
|
|
OfFile io.Reader `json:",omitzero,inline"`
|
|
OfBinaryArray []io.Reader `json:",omitzero,inline"`
|
|
paramUnion
|
|
}
|
|
|
|
func (u ImageEditParamsImageUnion) MarshalJSON() ([]byte, error) {
|
|
return param.MarshalUnion[ImageEditParamsImageUnion](u.OfFile, u.OfBinaryArray)
|
|
}
|
|
func (u *ImageEditParamsImageUnion) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, u)
|
|
}
|
|
|
|
func (u *ImageEditParamsImageUnion) asAny() any {
|
|
if !param.IsOmitted(u.OfFile) {
|
|
return &u.OfFile
|
|
} else if !param.IsOmitted(u.OfBinaryArray) {
|
|
return &u.OfBinaryArray
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
// `opaque` or `auto` (default value). When `auto` is used, the model will
|
|
// automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
type ImageEditParamsBackground string
|
|
|
|
const (
|
|
ImageEditParamsBackgroundTransparent ImageEditParamsBackground = "transparent"
|
|
ImageEditParamsBackgroundOpaque ImageEditParamsBackground = "opaque"
|
|
ImageEditParamsBackgroundAuto ImageEditParamsBackground = "auto"
|
|
)
|
|
|
|
// The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
// only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
// Defaults to `auto`.
|
|
type ImageEditParamsQuality string
|
|
|
|
const (
|
|
ImageEditParamsQualityStandard ImageEditParamsQuality = "standard"
|
|
ImageEditParamsQualityLow ImageEditParamsQuality = "low"
|
|
ImageEditParamsQualityMedium ImageEditParamsQuality = "medium"
|
|
ImageEditParamsQualityHigh ImageEditParamsQuality = "high"
|
|
ImageEditParamsQualityAuto ImageEditParamsQuality = "auto"
|
|
)
|
|
|
|
// The format in which the generated images are returned. Must be one of `url` or
|
|
// `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
// generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
// will always return base64-encoded images.
|
|
type ImageEditParamsResponseFormat string
|
|
|
|
const (
|
|
ImageEditParamsResponseFormatURL ImageEditParamsResponseFormat = "url"
|
|
ImageEditParamsResponseFormatB64JSON ImageEditParamsResponseFormat = "b64_json"
|
|
)
|
|
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
// `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
type ImageEditParamsSize string
|
|
|
|
const (
|
|
ImageEditParamsSize256x256 ImageEditParamsSize = "256x256"
|
|
ImageEditParamsSize512x512 ImageEditParamsSize = "512x512"
|
|
ImageEditParamsSize1024x1024 ImageEditParamsSize = "1024x1024"
|
|
ImageEditParamsSize1536x1024 ImageEditParamsSize = "1536x1024"
|
|
ImageEditParamsSize1024x1536 ImageEditParamsSize = "1024x1536"
|
|
ImageEditParamsSizeAuto ImageEditParamsSize = "auto"
|
|
)
|
|
|
|
type ImageGenerateParams struct {
|
|
// A text description of the desired image(s). The maximum length is 32000
|
|
// characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
// for `dall-e-3`.
|
|
Prompt string `json:"prompt,required"`
|
|
// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
// `n=1` is supported.
|
|
N param.Opt[int64] `json:"n,omitzero"`
|
|
// The compression level (0-100%) for the generated images. This parameter is only
|
|
// supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
// defaults to 100.
|
|
OutputCompression param.Opt[int64] `json:"output_compression,omitzero"`
|
|
// A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
// and detect abuse.
|
|
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
User param.Opt[string] `json:"user,omitzero"`
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
// `opaque` or `auto` (default value). When `auto` is used, the model will
|
|
// automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
//
|
|
// Any of "transparent", "opaque", "auto".
|
|
Background ImageGenerateParamsBackground `json:"background,omitzero"`
|
|
// The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
// `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
// `gpt-image-1` is used.
|
|
Model ImageModel `json:"model,omitzero"`
|
|
// Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
// be either `low` for less restrictive filtering or `auto` (default value).
|
|
//
|
|
// Any of "low", "auto".
|
|
Moderation ImageGenerateParamsModeration `json:"moderation,omitzero"`
|
|
// The format in which the generated images are returned. This parameter is only
|
|
// supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
//
|
|
// Any of "png", "jpeg", "webp".
|
|
OutputFormat ImageGenerateParamsOutputFormat `json:"output_format,omitzero"`
|
|
// The quality of the image that will be generated.
|
|
//
|
|
// - `auto` (default value) will automatically select the best quality for the
|
|
// given model.
|
|
// - `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
// - `hd` and `standard` are supported for `dall-e-3`.
|
|
// - `standard` is the only option for `dall-e-2`.
|
|
//
|
|
// Any of "standard", "hd", "low", "medium", "high", "auto".
|
|
Quality ImageGenerateParamsQuality `json:"quality,omitzero"`
|
|
// The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
// returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
// after the image has been generated. This parameter isn't supported for
|
|
// `gpt-image-1` which will always return base64-encoded images.
|
|
//
|
|
// Any of "url", "b64_json".
|
|
ResponseFormat ImageGenerateParamsResponseFormat `json:"response_format,omitzero"`
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
// `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
// one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
//
|
|
// Any of "auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512",
|
|
// "1792x1024", "1024x1792".
|
|
Size ImageGenerateParamsSize `json:"size,omitzero"`
|
|
// The style of the generated images. This parameter is only supported for
|
|
// `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
// towards generating hyper-real and dramatic images. Natural causes the model to
|
|
// produce more natural, less hyper-real looking images.
|
|
//
|
|
// Any of "vivid", "natural".
|
|
Style ImageGenerateParamsStyle `json:"style,omitzero"`
|
|
paramObj
|
|
}
|
|
|
|
func (r ImageGenerateParams) MarshalJSON() (data []byte, err error) {
|
|
type shadow ImageGenerateParams
|
|
return param.MarshalObject(r, (*shadow)(&r))
|
|
}
|
|
func (r *ImageGenerateParams) UnmarshalJSON(data []byte) error {
|
|
return apijson.UnmarshalRoot(data, r)
|
|
}
|
|
|
|
// Allows to set transparency for the background of the generated image(s). This
|
|
// parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
// `opaque` or `auto` (default value). When `auto` is used, the model will
|
|
// automatically determine the best background for the image.
|
|
//
|
|
// If `transparent`, the output format needs to support transparency, so it should
|
|
// be set to either `png` (default value) or `webp`.
|
|
type ImageGenerateParamsBackground string
|
|
|
|
const (
|
|
ImageGenerateParamsBackgroundTransparent ImageGenerateParamsBackground = "transparent"
|
|
ImageGenerateParamsBackgroundOpaque ImageGenerateParamsBackground = "opaque"
|
|
ImageGenerateParamsBackgroundAuto ImageGenerateParamsBackground = "auto"
|
|
)
|
|
|
|
// Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
// be either `low` for less restrictive filtering or `auto` (default value).
|
|
type ImageGenerateParamsModeration string
|
|
|
|
const (
|
|
ImageGenerateParamsModerationLow ImageGenerateParamsModeration = "low"
|
|
ImageGenerateParamsModerationAuto ImageGenerateParamsModeration = "auto"
|
|
)
|
|
|
|
// The format in which the generated images are returned. This parameter is only
|
|
// supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
type ImageGenerateParamsOutputFormat string
|
|
|
|
const (
|
|
ImageGenerateParamsOutputFormatPNG ImageGenerateParamsOutputFormat = "png"
|
|
ImageGenerateParamsOutputFormatJPEG ImageGenerateParamsOutputFormat = "jpeg"
|
|
ImageGenerateParamsOutputFormatWebP ImageGenerateParamsOutputFormat = "webp"
|
|
)
|
|
|
|
// The quality of the image that will be generated.
|
|
//
|
|
// - `auto` (default value) will automatically select the best quality for the
|
|
// given model.
|
|
// - `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
// - `hd` and `standard` are supported for `dall-e-3`.
|
|
// - `standard` is the only option for `dall-e-2`.
|
|
type ImageGenerateParamsQuality string
|
|
|
|
const (
|
|
ImageGenerateParamsQualityStandard ImageGenerateParamsQuality = "standard"
|
|
ImageGenerateParamsQualityHD ImageGenerateParamsQuality = "hd"
|
|
ImageGenerateParamsQualityLow ImageGenerateParamsQuality = "low"
|
|
ImageGenerateParamsQualityMedium ImageGenerateParamsQuality = "medium"
|
|
ImageGenerateParamsQualityHigh ImageGenerateParamsQuality = "high"
|
|
ImageGenerateParamsQualityAuto ImageGenerateParamsQuality = "auto"
|
|
)
|
|
|
|
// The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
// returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
// after the image has been generated. This parameter isn't supported for
|
|
// `gpt-image-1` which will always return base64-encoded images.
|
|
type ImageGenerateParamsResponseFormat string
|
|
|
|
const (
|
|
ImageGenerateParamsResponseFormatURL ImageGenerateParamsResponseFormat = "url"
|
|
ImageGenerateParamsResponseFormatB64JSON ImageGenerateParamsResponseFormat = "b64_json"
|
|
)
|
|
|
|
// The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
// (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
// `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
// one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
type ImageGenerateParamsSize string
|
|
|
|
const (
|
|
ImageGenerateParamsSizeAuto ImageGenerateParamsSize = "auto"
|
|
ImageGenerateParamsSize1024x1024 ImageGenerateParamsSize = "1024x1024"
|
|
ImageGenerateParamsSize1536x1024 ImageGenerateParamsSize = "1536x1024"
|
|
ImageGenerateParamsSize1024x1536 ImageGenerateParamsSize = "1024x1536"
|
|
ImageGenerateParamsSize256x256 ImageGenerateParamsSize = "256x256"
|
|
ImageGenerateParamsSize512x512 ImageGenerateParamsSize = "512x512"
|
|
ImageGenerateParamsSize1792x1024 ImageGenerateParamsSize = "1792x1024"
|
|
ImageGenerateParamsSize1024x1792 ImageGenerateParamsSize = "1024x1792"
|
|
)
|
|
|
|
// The style of the generated images. This parameter is only supported for
|
|
// `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
// towards generating hyper-real and dramatic images. Natural causes the model to
|
|
// produce more natural, less hyper-real looking images.
|
|
type ImageGenerateParamsStyle string
|
|
|
|
const (
|
|
ImageGenerateParamsStyleVivid ImageGenerateParamsStyle = "vivid"
|
|
ImageGenerateParamsStyleNatural ImageGenerateParamsStyle = "natural"
|
|
)
|