diff --git a/README.md b/README.md index 9882cb5..640f1c5 100644 --- a/README.md +++ b/README.md @@ -22,3 +22,12 @@ You can put these in `.env` file or pass them as environment variables. - Start stable diffusion webui: `cd sd-webui`, `./webui.sh --api` - Start bot: `deno task start` + +## Codegen + +The Stable Diffusion API in `common/sdApi.ts` is auto-generated. To regenerate it, first start your +SD WebUI with `--nowebui --api`, and then run: + +```sh +deno run npm:openapi-typescript http://localhost:7861/openapi.json -o common/sdApi.ts +``` diff --git a/bot/cancelCommand.ts b/bot/cancelCommand.ts index 5c8133a..090afbc 100644 --- a/bot/cancelCommand.ts +++ b/bot/cancelCommand.ts @@ -1,9 +1,11 @@ -import { jobStore } from "../db/jobStore.ts"; +import { generationQueue } from "../tasks/generationQueue.ts"; import { Context } from "./mod.ts"; export async function cancelCommand(ctx: Context) { - const jobs = await jobStore.getBy("status.type", { value: "waiting" }); - const userJobs = jobs.filter((j) => j.value.from.id === ctx.from?.id); - for (const job of userJobs) await job.delete(); + const jobs = await generationQueue.getAllJobs(); + const userJobs = jobs + .filter((job) => job.lockUntil > new Date()) + .filter((j) => j.state.from.id === ctx.from?.id); + for (const job of userJobs) await generationQueue.deleteJob(job.id); await ctx.reply(`Cancelled ${userJobs.length} jobs`); } diff --git a/bot/img2imgCommand.ts b/bot/img2imgCommand.ts index 4632f4b..0eb014c 100644 --- a/bot/img2imgCommand.ts +++ b/bot/img2imgCommand.ts @@ -1,8 +1,9 @@ import { Collections, Grammy, GrammyStatelessQ } from "../deps.ts"; -import { formatUserChat } from "../common/utils.ts"; -import { jobStore } from "../db/jobStore.ts"; +import { formatUserChat } from "../common/formatUserChat.ts"; import { parsePngInfo, PngInfo } from "../common/parsePngInfo.ts"; import { Context, logger } from "./mod.ts"; +import { generationQueue } from "../tasks/generationQueue.ts"; +import { getConfig } from "../db/config.ts"; export const img2imgQuestion = new GrammyStatelessQ.StatelessQuestion( "img2img", @@ -27,23 +28,25 @@ async function img2img( return; } - if (ctx.session.global.pausedReason != null) { - await ctx.reply(`I'm paused: ${ctx.session.global.pausedReason || "No reason given"}`); + const config = await getConfig(); + + if (config.pausedReason != null) { + await ctx.reply(`I'm paused: ${config.pausedReason || "No reason given"}`); return; } - const jobs = await jobStore.getBy("status.type", { value: "waiting" }); - if (jobs.length >= ctx.session.global.maxJobs) { + const jobs = await generationQueue.getAllJobs(); + if (jobs.length >= config.maxJobs) { await ctx.reply( - `The queue is full. Try again later. (Max queue size: ${ctx.session.global.maxJobs})`, + `The queue is full. Try again later. (Max queue size: ${config.maxJobs})`, ); return; } - const userJobs = jobs.filter((job) => job.value.from.id === ctx.message?.from?.id); - if (userJobs.length >= ctx.session.global.maxUserJobs) { + const userJobs = jobs.filter((job) => job.state.from.id === ctx.message?.from?.id); + if (userJobs.length >= config.maxUserJobs) { await ctx.reply( - `You already have ${ctx.session.global.maxUserJobs} jobs in queue. Try again later.`, + `You already have ${config.maxUserJobs} jobs in queue. Try again later.`, ); return; } @@ -98,12 +101,12 @@ async function img2img( const replyMessage = await ctx.reply("Accepted. You are now in queue."); - await jobStore.create({ + await generationQueue.pushJob({ task: { type: "img2img", params, fileId }, from: ctx.message.from, chat: ctx.message.chat, - requestMessageId: ctx.message.message_id, - status: { type: "waiting", message: replyMessage }, + requestMessage: ctx.message, + replyMessage: replyMessage, }); logger().debug(`Job enqueued for ${formatUserChat(ctx.message)}`); diff --git a/bot/mod.ts b/bot/mod.ts index 08ba4c0..d8c21fa 100644 --- a/bot/mod.ts +++ b/bot/mod.ts @@ -1,47 +1,81 @@ import { Grammy, GrammyAutoQuote, GrammyFiles, GrammyParseMode, Log } from "../deps.ts"; -import { formatUserChat } from "../common/utils.ts"; -import { session, SessionFlavor } from "./session.ts"; +import { formatUserChat } from "../common/formatUserChat.ts"; import { queueCommand } from "./queueCommand.ts"; import { txt2imgCommand, txt2imgQuestion } from "./txt2imgCommand.ts"; import { pnginfoCommand, pnginfoQuestion } from "./pnginfoCommand.ts"; import { img2imgCommand, img2imgQuestion } from "./img2imgCommand.ts"; import { cancelCommand } from "./cancelCommand.ts"; +import { getConfig, setConfig } from "../db/config.ts"; export const logger = () => Log.getLogger(); +interface SessionData { + chat: ChatData; + user: UserData; +} + +interface ChatData { + language?: string; +} + +interface UserData { + params?: Record; +} + +export type Context = + & GrammyFiles.FileFlavor> + & Grammy.SessionFlavor; + type WithRetryApi = { [M in keyof T]: T[M] extends (args: infer P, ...rest: infer A) => infer R ? (args: P extends object ? P & { maxAttempts?: number } : P, ...rest: A) => R : T[M]; }; -export type Context = - & GrammyFiles.FileFlavor> - & SessionFlavor; -export const bot = new Grammy.Bot>>( - Deno.env.get("TG_BOT_TOKEN") ?? "", -); +type Api = Grammy.Api>; + +export const bot = new Grammy.Bot(Deno.env.get("TG_BOT_TOKEN")!); + bot.use(GrammyAutoQuote.autoQuote); bot.use(GrammyParseMode.hydrateReply); -bot.use(session); +bot.use(Grammy.session< + SessionData, + Grammy.Context & Grammy.SessionFlavor +>({ + type: "multi", + chat: { + initial: () => ({}), + }, + user: { + getSessionKey: (ctx) => ctx.from?.id.toFixed(), + initial: () => ({}), + }, +})); bot.api.config.use(GrammyFiles.hydrateFiles(bot.token)); // Automatically cancel requests after 30 seconds bot.api.config.use(async (prev, method, payload, signal) => { + // don't time out getUpdates requests, they are long-polling + if (method === "getUpdates") return prev(method, payload, signal); + const controller = new AbortController(); let timedOut = false; const timeout = setTimeout(() => { timedOut = true; - // TODO: this sometimes throws with "can't abort a locked stream" and crashes whole process - controller.abort(); + // TODO: this sometimes throws with "can't abort a locked stream", why? + try { + controller.abort(); + } catch (error) { + logger().error(`Error while cancelling on timeout: ${error}`); + } }, 30 * 1000); signal?.addEventListener("abort", () => { controller.abort(); }); + try { - const result = await prev(method, payload, controller.signal); - return result; + return await prev(method, payload, controller.signal); } finally { clearTimeout(timeout); if (timedOut) { @@ -121,24 +155,26 @@ bot.command("queue", queueCommand); bot.command("cancel", cancelCommand); -bot.command("pause", (ctx) => { +bot.command("pause", async (ctx) => { if (!ctx.from?.username) return; - const config = ctx.session.global; + const config = await getConfig(); if (!config.adminUsernames.includes(ctx.from.username)) return; if (config.pausedReason != null) { return ctx.reply(`Already paused: ${config.pausedReason}`); } config.pausedReason = ctx.match ?? "No reason given"; + await setConfig(config); logger().warning(`Bot paused by ${ctx.from.first_name} because ${config.pausedReason}`); return ctx.reply("Paused"); }); -bot.command("resume", (ctx) => { +bot.command("resume", async (ctx) => { if (!ctx.from?.username) return; - const config = ctx.session.global; + const config = await getConfig(); if (!config.adminUsernames.includes(ctx.from.username)) return; if (config.pausedReason == null) return ctx.reply("Already running"); config.pausedReason = null; + await setConfig(config); logger().info(`Bot resumed by ${ctx.from.first_name}`); return ctx.reply("Resumed"); }); diff --git a/bot/pnginfoCommand.ts b/bot/pnginfoCommand.ts index b9600b6..d340462 100644 --- a/bot/pnginfoCommand.ts +++ b/bot/pnginfoCommand.ts @@ -1,5 +1,4 @@ import { Grammy, GrammyParseMode, GrammyStatelessQ } from "../deps.ts"; -import { fmt } from "../common/utils.ts"; import { getPngInfo, parsePngInfo } from "../common/parsePngInfo.ts"; import { Context } from "./mod.ts"; @@ -31,7 +30,7 @@ async function pnginfo(ctx: Context, includeRepliedTo: boolean): Promise { const buffer = await fetch(file.getUrl()).then((resp) => resp.arrayBuffer()); const params = parsePngInfo(getPngInfo(new Uint8Array(buffer)) ?? ""); - const { bold } = GrammyParseMode; + const { bold, fmt } = GrammyParseMode; const paramsText = fmt([ `${params.prompt}\n`, diff --git a/bot/queueCommand.ts b/bot/queueCommand.ts index d7a733b..c33c0d7 100644 --- a/bot/queueCommand.ts +++ b/bot/queueCommand.ts @@ -1,9 +1,8 @@ import { Grammy, GrammyParseMode } from "../deps.ts"; -import { fmt } from "../common/utils.ts"; -import { runningWorkers } from "../tasks/pingWorkers.ts"; -import { jobStore } from "../db/jobStore.ts"; import { Context, logger } from "./mod.ts"; import { getFlagEmoji } from "../common/getFlagEmoji.ts"; +import { activeGenerationWorkers, generationQueue } from "../tasks/generationQueue.ts"; +import { getConfig } from "../db/config.ts"; export async function queueCommand(ctx: Grammy.CommandContext) { let formattedMessage = await getMessageText(); @@ -11,38 +10,41 @@ export async function queueCommand(ctx: Grammy.CommandContext) { handleFutureUpdates().catch((err) => logger().warning(`Updating queue message failed: ${err}`)); async function getMessageText() { - const processingJobs = await jobStore.getBy("status.type", { value: "processing" }) - .then((jobs) => jobs.map((job) => ({ ...job.value, place: 0 }))); - const waitingJobs = await jobStore.getBy("status.type", { value: "waiting" }) - .then((jobs) => jobs.map((job, index) => ({ ...job.value, place: index + 1 }))); + const config = await getConfig(); + const allJobs = await generationQueue.getAllJobs(); + const processingJobs = allJobs + .filter((job) => job.lockUntil > new Date()).map((job) => ({ ...job, index: 0 })); + const waitingJobs = allJobs + .filter((job) => job.lockUntil <= new Date()) + .map((job, index) => ({ ...job, index: index + 1 })); const jobs = [...processingJobs, ...waitingJobs]; - const { bold } = GrammyParseMode; + const { bold, fmt } = GrammyParseMode; return fmt([ "Current queue:\n", ...jobs.length > 0 ? jobs.flatMap((job) => [ - `${job.place}. `, - fmt`${bold(job.from.first_name)} `, - job.from.last_name ? fmt`${bold(job.from.last_name)} ` : "", - job.from.username ? `(@${job.from.username}) ` : "", - getFlagEmoji(job.from.language_code) ?? "", - job.chat.type === "private" ? " in private chat " : ` in ${job.chat.title} `, - job.chat.type !== "private" && job.chat.type !== "group" && - job.chat.username - ? `(@${job.chat.username}) ` + `${job.index}. `, + fmt`${bold(job.state.from.first_name)} `, + job.state.from.last_name ? fmt`${bold(job.state.from.last_name)} ` : "", + job.state.from.username ? `(@${job.state.from.username}) ` : "", + getFlagEmoji(job.state.from.language_code) ?? "", + job.state.chat.type === "private" ? " in private chat " : ` in ${job.state.chat.title} `, + job.state.chat.type !== "private" && job.state.chat.type !== "group" && + job.state.chat.username + ? `(@${job.state.chat.username}) ` : "", - job.status.type === "processing" - ? `(${(job.status.progress * 100).toFixed(0)}% using ${job.status.worker}) ` + job.index === 0 && job.state.progress && job.state.sdInstanceId + ? `(${(job.state.progress * 100).toFixed(0)}% using ${job.state.sdInstanceId}) ` : "", "\n", ]) : ["Queue is empty.\n"], "\nActive workers:\n", - ...ctx.session.global.workers.flatMap((worker) => [ - runningWorkers.has(worker.id) ? "✅ " : "☠️ ", - fmt`${bold(worker.name || worker.id)} `, - `(max ${(worker.maxResolution / 1000000).toFixed(1)} Mpx) `, + ...config.sdInstances.flatMap((sdInstance) => [ + activeGenerationWorkers.has(sdInstance.id) ? "✅ " : "☠️ ", + fmt`${bold(sdInstance.name || sdInstance.id)} `, + `(max ${(sdInstance.maxResolution / 1000000).toFixed(1)} Mpx) `, "\n", ]), ]); diff --git a/bot/session.ts b/bot/session.ts deleted file mode 100644 index d18c97d..0000000 --- a/bot/session.ts +++ /dev/null @@ -1,81 +0,0 @@ -import { db } from "../db/db.ts"; -import { Grammy, GrammyKvStorage } from "../deps.ts"; -import { SdApi, SdTxt2ImgRequest } from "../common/sdApi.ts"; - -export type SessionFlavor = Grammy.SessionFlavor; - -export interface SessionData { - global: GlobalData; - chat: ChatData; - user: UserData; -} - -export interface GlobalData { - adminUsernames: string[]; - pausedReason: string | null; - maxUserJobs: number; - maxJobs: number; - defaultParams?: Partial; - workers: WorkerData[]; -} - -export interface WorkerData { - id: string; - name?: string; - api: SdApi; - maxResolution: number; -} - -export interface ChatData { - language?: string; -} - -export interface UserData { - params?: Partial; -} - -const globalDbAdapter = new GrammyKvStorage.DenoKVAdapter(db); - -const getDefaultGlobalData = (): GlobalData => ({ - adminUsernames: Deno.env.get("TG_ADMIN_USERS")?.split(",") ?? [], - pausedReason: null, - maxUserJobs: 3, - maxJobs: 20, - defaultParams: { - batch_size: 1, - n_iter: 1, - width: 512, - height: 768, - steps: 30, - cfg_scale: 10, - negative_prompt: "boring_e621_fluffyrock_v4 boring_e621_v4", - }, - workers: [ - { - id: "local", - api: { url: Deno.env.get("SD_API_URL") ?? "http://127.0.0.1:7860/" }, - maxResolution: 1024 * 1024, - }, - ], -}); - -export const session = Grammy.session({ - type: "multi", - global: { - getSessionKey: () => "global", - initial: getDefaultGlobalData, - storage: globalDbAdapter, - }, - chat: { - initial: () => ({}), - }, - user: { - getSessionKey: (ctx) => ctx.from?.id.toFixed(), - initial: () => ({}), - }, -}); - -export async function getGlobalSession(): Promise { - const data = await globalDbAdapter.read("global"); - return data ?? getDefaultGlobalData(); -} diff --git a/bot/txt2imgCommand.ts b/bot/txt2imgCommand.ts index c0fd9b1..ce6cdf3 100644 --- a/bot/txt2imgCommand.ts +++ b/bot/txt2imgCommand.ts @@ -1,8 +1,9 @@ import { Grammy, GrammyStatelessQ } from "../deps.ts"; -import { formatUserChat } from "../common/utils.ts"; -import { jobStore } from "../db/jobStore.ts"; +import { formatUserChat } from "../common/formatUserChat.ts"; import { getPngInfo, parsePngInfo, PngInfo } from "../common/parsePngInfo.ts"; import { Context, logger } from "./mod.ts"; +import { generationQueue } from "../tasks/generationQueue.ts"; +import { getConfig } from "../db/config.ts"; export const txt2imgQuestion = new GrammyStatelessQ.StatelessQuestion( "txt2img", @@ -22,23 +23,25 @@ async function txt2img(ctx: Context, match: string, includeRepliedTo: boolean): return; } - if (ctx.session.global.pausedReason != null) { - await ctx.reply(`I'm paused: ${ctx.session.global.pausedReason || "No reason given"}`); + const config = await getConfig(); + + if (config.pausedReason != null) { + await ctx.reply(`I'm paused: ${config.pausedReason || "No reason given"}`); return; } - const jobs = await jobStore.getBy("status.type", { value: "waiting" }); - if (jobs.length >= ctx.session.global.maxJobs) { + const jobs = await generationQueue.getAllJobs(); + if (jobs.length >= config.maxJobs) { await ctx.reply( - `The queue is full. Try again later. (Max queue size: ${ctx.session.global.maxJobs})`, + `The queue is full. Try again later. (Max queue size: ${config.maxJobs})`, ); return; } - const userJobs = jobs.filter((job) => job.value.from.id === ctx.message?.from?.id); - if (userJobs.length >= ctx.session.global.maxUserJobs) { + const userJobs = jobs.filter((job) => job.state.from.id === ctx.message?.from?.id); + if (userJobs.length >= config.maxUserJobs) { await ctx.reply( - `You already have ${ctx.session.global.maxUserJobs} jobs in queue. Try again later.`, + `You already have ${config.maxUserJobs} jobs in queue. Try again later.`, ); return; } @@ -72,12 +75,12 @@ async function txt2img(ctx: Context, match: string, includeRepliedTo: boolean): const replyMessage = await ctx.reply("Accepted. You are now in queue."); - await jobStore.create({ + await generationQueue.pushJob({ task: { type: "txt2img", params }, from: ctx.message.from, chat: ctx.message.chat, - requestMessageId: ctx.message.message_id, - status: { type: "waiting", message: replyMessage }, + requestMessage: ctx.message, + replyMessage: replyMessage, }); logger().debug(`Job enqueued for ${formatUserChat(ctx.message)}`); diff --git a/common/SdError.ts b/common/SdError.ts new file mode 100644 index 0000000..7dc2d08 --- /dev/null +++ b/common/SdError.ts @@ -0,0 +1,32 @@ +export interface SdErrorData { + /** + * The HTTP status message or array of invalid fields. + * Can also be empty string. + */ + detail?: string | Array<{ loc: (string | number)[]; msg: string; type: string }>; + /** Can be e.g. "OutOfMemoryError" or undefined. */ + error?: string; + /** Empty string. */ + body?: string; + /** Long description of error. */ + errors?: string; +} + +export class SdError extends Error { + constructor( + prefix: string, + public readonly response: Response, + public readonly body?: SdErrorData, + ) { + let message = `${prefix}: ${response.status} ${response.statusText}`; + if (body?.error) { + message += `: ${body.error}`; + if (body.errors) message += ` - ${body.errors}`; + } else if (typeof body?.detail === "string" && body.detail.length > 0) { + message += `: ${body.detail}`; + } else if (body?.detail) { + message += `: ${JSON.stringify(body.detail)}`; + } + super(message); + } +} diff --git a/common/deadline.ts b/common/deadline.ts new file mode 100644 index 0000000..c897f16 --- /dev/null +++ b/common/deadline.ts @@ -0,0 +1,5 @@ +export function deadline(timeout: number): AbortSignal { + const controller = new AbortController(); + setTimeout(() => controller.abort(), timeout); + return controller.signal; +} diff --git a/common/formatOrdinal.ts b/common/formatOrdinal.ts new file mode 100644 index 0000000..4ad805d --- /dev/null +++ b/common/formatOrdinal.ts @@ -0,0 +1,7 @@ +export function formatOrdinal(n: number) { + if (n % 100 === 11 || n % 100 === 12 || n % 100 === 13) return `${n}th`; + if (n % 10 === 1) return `${n}st`; + if (n % 10 === 2) return `${n}nd`; + if (n % 10 === 3) return `${n}rd`; + return `${n}th`; +} diff --git a/common/formatUserChat.ts b/common/formatUserChat.ts new file mode 100644 index 0000000..c38f09a --- /dev/null +++ b/common/formatUserChat.ts @@ -0,0 +1,28 @@ +import { GrammyTypes } from "../deps.ts"; + +export function formatUserChat(ctx: { from?: GrammyTypes.User; chat?: GrammyTypes.Chat }) { + const msg: string[] = []; + if (ctx.from) { + msg.push(ctx.from.first_name); + if (ctx.from.last_name) msg.push(ctx.from.last_name); + if (ctx.from.username) msg.push(`(@${ctx.from.username})`); + if (ctx.from.language_code) msg.push(`(${ctx.from.language_code.toUpperCase()})`); + } + if (ctx.chat) { + if ( + ctx.chat.type === "group" || + ctx.chat.type === "supergroup" || + ctx.chat.type === "channel" + ) { + msg.push("in"); + msg.push(ctx.chat.title); + if ( + (ctx.chat.type === "supergroup" || ctx.chat.type === "channel") && + ctx.chat.username + ) { + msg.push(`(@${ctx.chat.username})`); + } + } + } + return msg.join(" "); +} diff --git a/common/getFlagEmoji.ts b/common/getFlagEmoji.ts index 6d7a18a..47805dc 100644 --- a/common/getFlagEmoji.ts +++ b/common/getFlagEmoji.ts @@ -1,47 +1,47 @@ /** Language to biggest country emoji map */ const languageToFlagMap: Record = { - "en": "🇺🇸", - "zh": "🇨🇳", - "es": "🇪🇸", - "hi": "🇮🇳", - "ar": "🇪🇬", - "pt": "🇧🇷", - "bn": "🇧🇩", - "ru": "🇷🇺", - "ja": "🇯🇵", - "pa": "🇮🇳", - "de": "🇩🇪", - "ko": "🇰🇷", - "fr": "🇫🇷", - "tr": "🇹🇷", - "ur": "🇵🇰", - "it": "🇮🇹", - "th": "🇹🇭", - "vi": "🇻🇳", - "pl": "🇵🇱", - "uk": "🇺🇦", - "uz": "🇺🇿", - "su": "🇮🇩", - "sw": "🇹🇿", - "nl": "🇳🇱", - "fi": "🇫🇮", - "el": "🇬🇷", - "da": "🇩🇰", - "cs": "🇨🇿", - "sk": "🇸🇰", - "bg": "🇧🇬", - "sv": "🇸🇪", - "be": "🇧🇾", - "hu": "🇭🇺", - "lt": "🇱🇹", - "lv": "🇱🇻", - "et": "🇪🇪", - "sl": "🇸🇮", - "hr": "🇭🇷", - "zu": "🇿🇦", - "id": "🇮🇩", - "is": "🇮🇸", - "lb": "🇱🇺", // Luxembourgish - Luxembourg + "en": "🇺🇸", // english - united states + "zh": "🇨🇳", // chinese - china + "es": "🇪🇸", // spanish - spain + "hi": "🇮🇳", // hindi - india + "ar": "🇪🇬", // arabic - egypt + "pt": "🇧🇷", // portuguese - brazil + "bn": "🇧🇩", // bengali - bangladesh + "ru": "🇷🇺", // russian - russia + "ja": "🇯🇵", // japanese - japan + "pa": "🇮🇳", // punjabi - india + "de": "🇩🇪", // german - germany + "ko": "🇰🇷", // korean - south korea + "fr": "🇫🇷", // french - france + "tr": "🇹🇷", // turkish - turkey + "ur": "🇵🇰", // urdu - pakistan + "it": "🇮🇹", // italian - italy + "th": "🇹🇭", // thai - thailand + "vi": "🇻🇳", // vietnamese - vietnam + "pl": "🇵🇱", // polish - poland + "uk": "🇺🇦", // ukrainian - ukraine + "uz": "🇺🇿", // uzbek - uzbekistan + "su": "🇮🇩", // sundanese - indonesia + "sw": "🇹🇿", // swahili - tanzania + "nl": "🇳🇱", // dutch - netherlands + "fi": "🇫🇮", // finnish - finland + "el": "🇬🇷", // greek - greece + "da": "🇩🇰", // danish - denmark + "cs": "🇨🇿", // czech - czech republic + "sk": "🇸🇰", // slovak - slovakia + "bg": "🇧🇬", // bulgarian - bulgaria + "sv": "🇸🇪", // swedish - sweden + "be": "🇧🇾", // belarusian - belarus + "hu": "🇭🇺", // hungarian - hungary + "lt": "🇱🇹", // lithuanian - lithuania + "lv": "🇱🇻", // latvian - latvia + "et": "🇪🇪", // estonian - estonia + "sl": "🇸🇮", // slovenian - slovenia + "hr": "🇭🇷", // croatian - croatia + "zu": "🇿🇦", // zulu - south africa + "id": "🇮🇩", // indonesian - indonesia + "is": "🇮🇸", // icelandic - iceland + "lb": "🇱🇺", // luxembourgish - luxembourg }; export function getFlagEmoji(languageCode?: string): string | undefined { diff --git a/common/sdApi.ts b/common/sdApi.ts index 175168b..1a82c2b 100644 --- a/common/sdApi.ts +++ b/common/sdApi.ts @@ -1,300 +1,3692 @@ -import { Async, AsyncX } from "../deps.ts"; +/** + * This file was auto-generated by openapi-typescript. + * Do not make direct changes to the file. + */ -export interface SdApi { - url: string; - auth?: string; +export interface paths { + "/sdapi/v1/txt2img": { + /** Text2Imgapi */ + post: operations["text2imgapi_sdapi_v1_txt2img_post"]; + }; + "/sdapi/v1/img2img": { + /** Img2Imgapi */ + post: operations["img2imgapi_sdapi_v1_img2img_post"]; + }; + "/sdapi/v1/extra-single-image": { + /** Extras Single Image Api */ + post: operations["extras_single_image_api_sdapi_v1_extra_single_image_post"]; + }; + "/sdapi/v1/extra-batch-images": { + /** Extras Batch Images Api */ + post: operations["extras_batch_images_api_sdapi_v1_extra_batch_images_post"]; + }; + "/sdapi/v1/png-info": { + /** Pnginfoapi */ + post: operations["pnginfoapi_sdapi_v1_png_info_post"]; + }; + "/sdapi/v1/progress": { + /** Progressapi */ + get: operations["progressapi_sdapi_v1_progress_get"]; + }; + "/sdapi/v1/interrogate": { + /** Interrogateapi */ + post: operations["interrogateapi_sdapi_v1_interrogate_post"]; + }; + "/sdapi/v1/interrupt": { + /** Interruptapi */ + post: operations["interruptapi_sdapi_v1_interrupt_post"]; + }; + "/sdapi/v1/skip": { + /** Skip */ + post: operations["skip_sdapi_v1_skip_post"]; + }; + "/sdapi/v1/options": { + /** Get Config */ + get: operations["get_config_sdapi_v1_options_get"]; + /** Set Config */ + post: operations["set_config_sdapi_v1_options_post"]; + }; + "/sdapi/v1/cmd-flags": { + /** Get Cmd Flags */ + get: operations["get_cmd_flags_sdapi_v1_cmd_flags_get"]; + }; + "/sdapi/v1/samplers": { + /** Get Samplers */ + get: operations["get_samplers_sdapi_v1_samplers_get"]; + }; + "/sdapi/v1/upscalers": { + /** Get Upscalers */ + get: operations["get_upscalers_sdapi_v1_upscalers_get"]; + }; + "/sdapi/v1/latent-upscale-modes": { + /** Get Latent Upscale Modes */ + get: operations["get_latent_upscale_modes_sdapi_v1_latent_upscale_modes_get"]; + }; + "/sdapi/v1/sd-models": { + /** Get Sd Models */ + get: operations["get_sd_models_sdapi_v1_sd_models_get"]; + }; + "/sdapi/v1/sd-vae": { + /** Get Sd Vaes */ + get: operations["get_sd_vaes_sdapi_v1_sd_vae_get"]; + }; + "/sdapi/v1/hypernetworks": { + /** Get Hypernetworks */ + get: operations["get_hypernetworks_sdapi_v1_hypernetworks_get"]; + }; + "/sdapi/v1/face-restorers": { + /** Get Face Restorers */ + get: operations["get_face_restorers_sdapi_v1_face_restorers_get"]; + }; + "/sdapi/v1/realesrgan-models": { + /** Get Realesrgan Models */ + get: operations["get_realesrgan_models_sdapi_v1_realesrgan_models_get"]; + }; + "/sdapi/v1/prompt-styles": { + /** Get Prompt Styles */ + get: operations["get_prompt_styles_sdapi_v1_prompt_styles_get"]; + }; + "/sdapi/v1/embeddings": { + /** Get Embeddings */ + get: operations["get_embeddings_sdapi_v1_embeddings_get"]; + }; + "/sdapi/v1/refresh-checkpoints": { + /** Refresh Checkpoints */ + post: operations["refresh_checkpoints_sdapi_v1_refresh_checkpoints_post"]; + }; + "/sdapi/v1/create/embedding": { + /** Create Embedding */ + post: operations["create_embedding_sdapi_v1_create_embedding_post"]; + }; + "/sdapi/v1/create/hypernetwork": { + /** Create Hypernetwork */ + post: operations["create_hypernetwork_sdapi_v1_create_hypernetwork_post"]; + }; + "/sdapi/v1/preprocess": { + /** Preprocess */ + post: operations["preprocess_sdapi_v1_preprocess_post"]; + }; + "/sdapi/v1/train/embedding": { + /** Train Embedding */ + post: operations["train_embedding_sdapi_v1_train_embedding_post"]; + }; + "/sdapi/v1/train/hypernetwork": { + /** Train Hypernetwork */ + post: operations["train_hypernetwork_sdapi_v1_train_hypernetwork_post"]; + }; + "/sdapi/v1/memory": { + /** Get Memory */ + get: operations["get_memory_sdapi_v1_memory_get"]; + }; + "/sdapi/v1/unload-checkpoint": { + /** Unloadapi */ + post: operations["unloadapi_sdapi_v1_unload_checkpoint_post"]; + }; + "/sdapi/v1/reload-checkpoint": { + /** Reloadapi */ + post: operations["reloadapi_sdapi_v1_reload_checkpoint_post"]; + }; + "/sdapi/v1/scripts": { + /** Get Scripts List */ + get: operations["get_scripts_list_sdapi_v1_scripts_get"]; + }; + "/sdapi/v1/script-info": { + /** Get Script Info */ + get: operations["get_script_info_sdapi_v1_script_info_get"]; + }; + "/tacapi/v1/lora-info/{lora_name}": { + /** Get Lora Info */ + get: operations["get_lora_info_tacapi_v1_lora_info__lora_name__get"]; + }; + "/tacapi/v1/lyco-info/{lyco_name}": { + /** Get Lyco Info */ + get: operations["get_lyco_info_tacapi_v1_lyco_info__lyco_name__get"]; + }; + "/tacapi/v1/thumb-preview/{filename}": { + /** Get Thumb Preview */ + get: operations["get_thumb_preview_tacapi_v1_thumb_preview__filename__get"]; + }; + "/tacapi/v1/thumb-preview-blob/{filename}": { + /** Get Thumb Preview Blob */ + get: operations["get_thumb_preview_blob_tacapi_v1_thumb_preview_blob__filename__get"]; + }; + "/sdapi/v1/loras": { + /** Get Loras */ + get: operations["get_loras_sdapi_v1_loras_get"]; + }; + "/sdapi/v1/refresh-loras": { + /** Refresh Loras */ + post: operations["refresh_loras_sdapi_v1_refresh_loras_post"]; + }; } -async function fetchSdApi( - api: SdApi, - endpoint: string, - { body, timeoutMs }: { body?: unknown; timeoutMs?: number } = {}, -): Promise { - const controller = new AbortController(); - const timeoutId = timeoutMs ? setTimeout(() => controller.abort(), timeoutMs) : undefined; - let options: RequestInit | undefined; - if (body != null) { - options = { - method: "POST", - headers: { - "Content-Type": "application/json", - ...api.auth ? { Authorization: api.auth } : {}, - }, - body: JSON.stringify(body), - signal: controller.signal, +export type webhooks = Record; + +export interface components { + schemas: { + /** CreateResponse */ + CreateResponse: { + /** + * Create info + * @description Response string from create embedding or hypernetwork task. + */ + info: string; }; - } else if (api.auth) { - options = { - headers: { Authorization: api.auth }, - signal: controller.signal, + /** EmbeddingItem */ + EmbeddingItem: { + /** + * Step + * @description The number of steps that were used to train this embedding, if available + */ + step?: number; + /** + * SD Checkpoint + * @description The hash of the checkpoint this embedding was trained on, if available + */ + sd_checkpoint?: string; + /** + * SD Checkpoint Name + * @description The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead + */ + sd_checkpoint_name?: string; + /** + * Shape + * @description The length of each individual vector in the embedding + */ + shape: number; + /** + * Vectors + * @description The number of vectors in the embedding + */ + vectors: number; }; - } - const response = await fetch(new URL(endpoint, api.url), options).catch(() => { - if (controller.signal.aborted) { - throw new SdApiError(endpoint, options, -1, "Timed out"); - } - throw new SdApiError(endpoint, options, 0, "Network error"); - }); - const result = await response.json().catch(() => { - throw new SdApiError(endpoint, options, response.status, response.statusText, { - detail: "Invalid JSON", - }); - }); - clearTimeout(timeoutId); - if (!response.ok) { - throw new SdApiError(endpoint, options, response.status, response.statusText, result); - } - return result; + /** EmbeddingsResponse */ + EmbeddingsResponse: { + /** + * Loaded + * @description Embeddings loaded for the current model + */ + loaded: { + [key: string]: components["schemas"]["EmbeddingItem"]; + }; + /** + * Skipped + * @description Embeddings skipped for the current model (likely due to architecture incompatibility) + */ + skipped: { + [key: string]: components["schemas"]["EmbeddingItem"]; + }; + }; + /** ExtrasBatchImagesRequest */ + ExtrasBatchImagesRequest: { + /** + * Resize Mode + * @description Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w. + * @default 0 + * @enum {integer} + */ + resize_mode?: 0 | 1; + /** + * Show results + * @description Should the backend return the generated image? + * @default true + */ + show_extras_results?: boolean; + /** + * GFPGAN Visibility + * @description Sets the visibility of GFPGAN, values should be between 0 and 1. + * @default 0 + */ + gfpgan_visibility?: number; + /** + * CodeFormer Visibility + * @description Sets the visibility of CodeFormer, values should be between 0 and 1. + * @default 0 + */ + codeformer_visibility?: number; + /** + * CodeFormer Weight + * @description Sets the weight of CodeFormer, values should be between 0 and 1. + * @default 0 + */ + codeformer_weight?: number; + /** + * Upscaling Factor + * @description By how much to upscale the image, only used when resize_mode=0. + * @default 2 + */ + upscaling_resize?: number; + /** + * Target Width + * @description Target width for the upscaler to hit. Only used when resize_mode=1. + * @default 512 + */ + upscaling_resize_w?: number; + /** + * Target Height + * @description Target height for the upscaler to hit. Only used when resize_mode=1. + * @default 512 + */ + upscaling_resize_h?: number; + /** + * Crop to fit + * @description Should the upscaler crop the image to fit in the chosen size? + * @default true + */ + upscaling_crop?: boolean; + /** + * Main upscaler + * @description The name of the main upscaler to use, it has to be one of this list: None , Lanczos , Nearest , ESRGAN_4x , LDSR , R-ESRGAN 4x+ , R-ESRGAN 4x+ Anime6B , ScuNET GAN , ScuNET PSNR , SwinIR 4x + * @default None + */ + upscaler_1?: string; + /** + * Secondary upscaler + * @description The name of the secondary upscaler to use, it has to be one of this list: None , Lanczos , Nearest , ESRGAN_4x , LDSR , R-ESRGAN 4x+ , R-ESRGAN 4x+ Anime6B , ScuNET GAN , ScuNET PSNR , SwinIR 4x + * @default None + */ + upscaler_2?: string; + /** + * Secondary upscaler visibility + * @description Sets the visibility of secondary upscaler, values should be between 0 and 1. + * @default 0 + */ + extras_upscaler_2_visibility?: number; + /** + * Upscale first + * @description Should the upscaler run before restoring faces? + * @default false + */ + upscale_first?: boolean; + /** + * Images + * @description List of images to work on. Must be Base64 strings + */ + imageList: components["schemas"]["FileData"][]; + }; + /** ExtrasBatchImagesResponse */ + ExtrasBatchImagesResponse: { + /** + * HTML info + * @description A series of HTML tags containing the process info. + */ + html_info: string; + /** + * Images + * @description The generated images in base64 format. + */ + images: string[]; + }; + /** ExtrasSingleImageRequest */ + ExtrasSingleImageRequest: { + /** + * Resize Mode + * @description Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w. + * @default 0 + * @enum {integer} + */ + resize_mode?: 0 | 1; + /** + * Show results + * @description Should the backend return the generated image? + * @default true + */ + show_extras_results?: boolean; + /** + * GFPGAN Visibility + * @description Sets the visibility of GFPGAN, values should be between 0 and 1. + * @default 0 + */ + gfpgan_visibility?: number; + /** + * CodeFormer Visibility + * @description Sets the visibility of CodeFormer, values should be between 0 and 1. + * @default 0 + */ + codeformer_visibility?: number; + /** + * CodeFormer Weight + * @description Sets the weight of CodeFormer, values should be between 0 and 1. + * @default 0 + */ + codeformer_weight?: number; + /** + * Upscaling Factor + * @description By how much to upscale the image, only used when resize_mode=0. + * @default 2 + */ + upscaling_resize?: number; + /** + * Target Width + * @description Target width for the upscaler to hit. Only used when resize_mode=1. + * @default 512 + */ + upscaling_resize_w?: number; + /** + * Target Height + * @description Target height for the upscaler to hit. Only used when resize_mode=1. + * @default 512 + */ + upscaling_resize_h?: number; + /** + * Crop to fit + * @description Should the upscaler crop the image to fit in the chosen size? + * @default true + */ + upscaling_crop?: boolean; + /** + * Main upscaler + * @description The name of the main upscaler to use, it has to be one of this list: None , Lanczos , Nearest , ESRGAN_4x , LDSR , R-ESRGAN 4x+ , R-ESRGAN 4x+ Anime6B , ScuNET GAN , ScuNET PSNR , SwinIR 4x + * @default None + */ + upscaler_1?: string; + /** + * Secondary upscaler + * @description The name of the secondary upscaler to use, it has to be one of this list: None , Lanczos , Nearest , ESRGAN_4x , LDSR , R-ESRGAN 4x+ , R-ESRGAN 4x+ Anime6B , ScuNET GAN , ScuNET PSNR , SwinIR 4x + * @default None + */ + upscaler_2?: string; + /** + * Secondary upscaler visibility + * @description Sets the visibility of secondary upscaler, values should be between 0 and 1. + * @default 0 + */ + extras_upscaler_2_visibility?: number; + /** + * Upscale first + * @description Should the upscaler run before restoring faces? + * @default false + */ + upscale_first?: boolean; + /** + * Image + * @description Image to work on, must be a Base64 string containing the image's data. + * @default + */ + image?: string; + }; + /** ExtrasSingleImageResponse */ + ExtrasSingleImageResponse: { + /** + * HTML info + * @description A series of HTML tags containing the process info. + */ + html_info: string; + /** + * Image + * @description The generated image in base64 format. + */ + image?: string; + }; + /** FaceRestorerItem */ + FaceRestorerItem: { + /** Name */ + name: string; + /** Path */ + cmd_dir?: string; + }; + /** FileData */ + FileData: { + /** + * File data + * @description Base64 representation of the file + */ + data: string; + /** File name */ + name: string; + }; + /** Flags */ + Flags: { + /** + * F + * @description ==SUPPRESS== + * @default false + */ + f?: boolean; + /** + * Update All Extensions + * @description launch.py argument: download updates for all extensions when starting the program + * @default false + */ + update_all_extensions?: boolean; + /** + * Skip Python Version Check + * @description launch.py argument: do not check python version + * @default false + */ + skip_python_version_check?: boolean; + /** + * Skip Torch Cuda Test + * @description launch.py argument: do not check if CUDA is able to work properly + * @default false + */ + skip_torch_cuda_test?: boolean; + /** + * Reinstall Xformers + * @description launch.py argument: install the appropriate version of xformers even if you have some version already installed + * @default false + */ + reinstall_xformers?: boolean; + /** + * Reinstall Torch + * @description launch.py argument: install the appropriate version of torch even if you have some version already installed + * @default false + */ + reinstall_torch?: boolean; + /** + * Update Check + * @description launch.py argument: check for updates at startup + * @default false + */ + update_check?: boolean; + /** + * Test Server + * @description launch.py argument: configure server for testing + * @default false + */ + test_server?: boolean; + /** + * Skip Prepare Environment + * @description launch.py argument: skip all environment preparation + * @default false + */ + skip_prepare_environment?: boolean; + /** + * Skip Install + * @description launch.py argument: skip installation of packages + * @default false + */ + skip_install?: boolean; + /** + * Do Not Download Clip + * @description do not download CLIP model even if it's not included in the checkpoint + * @default false + */ + do_not_download_clip?: boolean; + /** + * Data Dir + * @description base path where all user data is stored + * @default /home/lisq/.sd + */ + data_dir?: string; + /** + * Config + * @description path to config which constructs model + * @default /home/lisq/.sd/configs/v1-inference.yaml + */ + config?: string; + /** + * Ckpt + * @description path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded + * @default /home/lisq/.sd/model.ckpt + */ + ckpt?: string; + /** + * Ckpt Dir + * @description Path to directory with stable diffusion checkpoints + */ + ckpt_dir?: string; + /** + * Vae Dir + * @description Path to directory with VAE files + */ + vae_dir?: string; + /** + * Gfpgan Dir + * @description GFPGAN directory + * @default ./GFPGAN + */ + gfpgan_dir?: string; + /** + * Gfpgan Model + * @description GFPGAN model file name + */ + gfpgan_model?: string; + /** + * No Half + * @description do not switch the model to 16-bit floats + * @default false + */ + no_half?: boolean; + /** + * No Half Vae + * @description do not switch the VAE model to 16-bit floats + * @default false + */ + no_half_vae?: boolean; + /** + * No Progressbar Hiding + * @description do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser) + * @default false + */ + no_progressbar_hiding?: boolean; + /** + * Max Batch Count + * @description maximum batch count value for the UI + * @default 16 + */ + max_batch_count?: number; + /** + * Embeddings Dir + * @description embeddings directory for textual inversion (default: embeddings) + * @default /home/lisq/.sd/embeddings + */ + embeddings_dir?: string; + /** + * Textual Inversion Templates Dir + * @description directory with textual inversion templates + * @default /home/lisq/.sd/textual_inversion_templates + */ + textual_inversion_templates_dir?: string; + /** + * Hypernetwork Dir + * @description hypernetwork directory + * @default /home/lisq/.sd/models/hypernetworks + */ + hypernetwork_dir?: string; + /** + * Localizations Dir + * @description localizations directory + * @default /home/lisq/.sd/localizations + */ + localizations_dir?: string; + /** + * Allow Code + * @description allow custom script execution from webui + * @default false + */ + allow_code?: boolean; + /** + * Medvram + * @description enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage + * @default false + */ + medvram?: boolean; + /** + * Lowvram + * @description enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage + * @default false + */ + lowvram?: boolean; + /** + * Lowram + * @description load stable diffusion checkpoint weights to VRAM instead of RAM + * @default false + */ + lowram?: boolean; + /** + * Always Batch Cond Uncond + * @description disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram + * @default false + */ + always_batch_cond_uncond?: boolean; + /** + * Unload Gfpgan + * @description does not do anything. + * @default false + */ + unload_gfpgan?: boolean; + /** + * Precision + * @description evaluate at this precision + * @default autocast + */ + precision?: string; + /** + * Upcast Sampling + * @description upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory. + * @default false + */ + upcast_sampling?: boolean; + /** + * Share + * @description use share=True for gradio and make the UI accessible through their site + * @default false + */ + share?: boolean; + /** + * Ngrok + * @description ngrok authtoken, alternative to gradio --share + */ + ngrok?: string; + /** + * Ngrok Region + * @description does not do anything. + * @default + */ + ngrok_region?: string; + /** + * Ngrok Options + * @description The options to pass to ngrok in JSON format, e.g.: '{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}' + * @default {} + */ + ngrok_options?: Record; + /** + * Enable Insecure Extension Access + * @description enable extensions tab regardless of other options + * @default false + */ + enable_insecure_extension_access?: boolean; + /** + * Codeformer Models Path + * @description Path to directory with codeformer model file(s). + * @default /home/lisq/.sd/models/Codeformer + */ + codeformer_models_path?: string; + /** + * Gfpgan Models Path + * @description Path to directory with GFPGAN model file(s). + * @default /home/lisq/.sd/models/GFPGAN + */ + gfpgan_models_path?: string; + /** + * Esrgan Models Path + * @description Path to directory with ESRGAN model file(s). + * @default /home/lisq/.sd/models/ESRGAN + */ + esrgan_models_path?: string; + /** + * Bsrgan Models Path + * @description Path to directory with BSRGAN model file(s). + * @default /home/lisq/.sd/models/BSRGAN + */ + bsrgan_models_path?: string; + /** + * Realesrgan Models Path + * @description Path to directory with RealESRGAN model file(s). + * @default /home/lisq/.sd/models/RealESRGAN + */ + realesrgan_models_path?: string; + /** + * Clip Models Path + * @description Path to directory with CLIP model file(s). + */ + clip_models_path?: string; + /** + * Xformers + * @description enable xformers for cross attention layers + * @default false + */ + xformers?: boolean; + /** + * Force Enable Xformers + * @description enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work + * @default false + */ + force_enable_xformers?: boolean; + /** + * Xformers Flash Attention + * @description enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only) + * @default false + */ + xformers_flash_attention?: boolean; + /** + * Deepdanbooru + * @description does not do anything + * @default false + */ + deepdanbooru?: boolean; + /** + * Opt Split Attention + * @description prefer Doggettx's cross-attention layer optimization for automatic choice of optimization + * @default false + */ + opt_split_attention?: boolean; + /** + * Opt Sub Quad Attention + * @description prefer memory efficient sub-quadratic cross-attention layer optimization for automatic choice of optimization + * @default false + */ + opt_sub_quad_attention?: boolean; + /** + * Sub Quad Q Chunk Size + * @description query chunk size for the sub-quadratic cross-attention layer optimization to use + * @default 1024 + */ + sub_quad_q_chunk_size?: number; + /** + * Sub Quad Kv Chunk Size + * @description kv chunk size for the sub-quadratic cross-attention layer optimization to use + */ + sub_quad_kv_chunk_size?: string; + /** + * Sub Quad Chunk Threshold + * @description the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking + */ + sub_quad_chunk_threshold?: string; + /** + * Opt Split Attention Invokeai + * @description prefer InvokeAI's cross-attention layer optimization for automatic choice of optimization + * @default false + */ + opt_split_attention_invokeai?: boolean; + /** + * Opt Split Attention V1 + * @description prefer older version of split attention optimization for automatic choice of optimization + * @default false + */ + opt_split_attention_v1?: boolean; + /** + * Opt Sdp Attention + * @description prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.* + * @default false + */ + opt_sdp_attention?: boolean; + /** + * Opt Sdp No Mem Attention + * @description prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.* + * @default false + */ + opt_sdp_no_mem_attention?: boolean; + /** + * Disable Opt Split Attention + * @description prefer no cross-attention layer optimization for automatic choice of optimization + * @default false + */ + disable_opt_split_attention?: boolean; + /** + * Disable Nan Check + * @description do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI + * @default false + */ + disable_nan_check?: boolean; + /** + * Use Cpu + * @description use CPU as torch device for specified modules + * @default [] + */ + use_cpu?: unknown[]; + /** + * Listen + * @description launch gradio with 0.0.0.0 as server name, allowing to respond to network requests + * @default false + */ + listen?: boolean; + /** + * Port + * @description launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available + */ + port?: string; + /** + * Show Negative Prompt + * @description does not do anything + * @default false + */ + show_negative_prompt?: boolean; + /** + * Ui Config File + * @description filename to use for ui configuration + * @default /home/lisq/.sd/ui-config.json + */ + ui_config_file?: string; + /** + * Hide Ui Dir Config + * @description hide directory configuration from webui + * @default false + */ + hide_ui_dir_config?: boolean; + /** + * Freeze Settings + * @description disable editing settings + * @default false + */ + freeze_settings?: boolean; + /** + * Ui Settings File + * @description filename to use for ui settings + * @default /home/lisq/.sd/config.json + */ + ui_settings_file?: string; + /** + * Gradio Debug + * @description launch gradio with --debug option + * @default false + */ + gradio_debug?: boolean; + /** + * Gradio Auth + * @description set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3" + */ + gradio_auth?: string; + /** + * Gradio Auth Path + * @description set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth + */ + gradio_auth_path?: string; + /** + * Gradio Img2Img Tool + * @description does not do anything + */ + gradio_img2img_tool?: string; + /** + * Gradio Inpaint Tool + * @description does not do anything + */ + gradio_inpaint_tool?: string; + /** + * Gradio Allowed Path + * @description add path to gradio's allowed_paths, make it possible to serve files from it + */ + gradio_allowed_path?: string; + /** + * Opt Channelslast + * @description change memory type for stable diffusion to channels last + * @default false + */ + opt_channelslast?: boolean; + /** + * Styles File + * @description filename to use for styles + * @default /home/lisq/.sd/styles.csv + */ + styles_file?: string; + /** + * Autolaunch + * @description open the webui URL in the system's default browser upon launch + * @default false + */ + autolaunch?: boolean; + /** + * Theme + * @description launches the UI with light or dark theme + */ + theme?: string; + /** + * Use Textbox Seed + * @description use textbox for seeds in UI (no up/down, but possible to input long seeds) + * @default false + */ + use_textbox_seed?: boolean; + /** + * Disable Console Progressbars + * @description do not output progressbars to console + * @default false + */ + disable_console_progressbars?: boolean; + /** + * Enable Console Prompts + * @description print prompts to console when generating with txt2img and img2img + * @default false + */ + enable_console_prompts?: boolean; + /** + * Vae Path + * @description Checkpoint to use as VAE; setting this argument disables all settings related to VAE + */ + vae_path?: string; + /** + * Disable Safe Unpickle + * @description disable checking pytorch models for malicious code + * @default false + */ + disable_safe_unpickle?: boolean; + /** + * Api + * @description use api=True to launch the API together with the webui (use --nowebui instead for only the API) + * @default false + */ + api?: boolean; + /** + * Api Auth + * @description Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3" + */ + api_auth?: string; + /** + * Api Log + * @description use api-log=True to enable logging of all API requests + * @default false + */ + api_log?: boolean; + /** + * Nowebui + * @description use api=True to launch the API instead of the webui + * @default false + */ + nowebui?: boolean; + /** + * Ui Debug Mode + * @description Don't load model to quickly launch UI + * @default false + */ + ui_debug_mode?: boolean; + /** + * Device Id + * @description Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before) + */ + device_id?: string; + /** + * Administrator + * @description Administrator rights + * @default false + */ + administrator?: boolean; + /** + * Cors Allow Origins + * @description Allowed CORS origin(s) in the form of a comma-separated list (no spaces) + */ + cors_allow_origins?: string; + /** + * Cors Allow Origins Regex + * @description Allowed CORS origin(s) in the form of a single regular expression + */ + cors_allow_origins_regex?: string; + /** + * Tls Keyfile + * @description Partially enables TLS, requires --tls-certfile to fully function + */ + tls_keyfile?: string; + /** + * Tls Certfile + * @description Partially enables TLS, requires --tls-keyfile to fully function + */ + tls_certfile?: string; + /** + * Disable Tls Verify + * @description When passed, enables the use of self-signed certificates. + */ + disable_tls_verify?: string; + /** + * Server Name + * @description Sets hostname of server + */ + server_name?: string; + /** + * Gradio Queue + * @description does not do anything + * @default true + */ + gradio_queue?: boolean; + /** + * No Gradio Queue + * @description Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions + * @default false + */ + no_gradio_queue?: boolean; + /** + * Skip Version Check + * @description Do not check versions of torch and xformers + * @default false + */ + skip_version_check?: boolean; + /** + * No Hashing + * @description disable sha256 hashing of checkpoints to help loading performance + * @default false + */ + no_hashing?: boolean; + /** + * No Download Sd Model + * @description don't download SD1.5 model even if no model is found in --ckpt-dir + * @default false + */ + no_download_sd_model?: boolean; + /** + * Subpath + * @description customize the subpath for gradio, use with reverse proxy + */ + subpath?: string; + /** + * Add Stop Route + * @description add /_stop route to stop server + * @default false + */ + add_stop_route?: boolean; + /** + * Api Server Stop + * @description enable server stop/restart/kill via api + * @default false + */ + api_server_stop?: boolean; + /** + * Timeout Keep Alive + * @description set timeout_keep_alive for uvicorn + * @default 30 + */ + timeout_keep_alive?: number; + /** + * Ldsr Models Path + * @description Path to directory with LDSR model file(s). + * @default /home/lisq/.sd/models/LDSR + */ + ldsr_models_path?: string; + /** + * Lora Dir + * @description Path to directory with Lora networks. + * @default /home/lisq/.sd/models/Lora + */ + lora_dir?: string; + /** + * Lyco Dir Backcompat + * @description Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir). + * @default /home/lisq/.sd/models/LyCORIS + */ + lyco_dir_backcompat?: string; + /** + * Scunet Models Path + * @description Path to directory with ScuNET model file(s). + * @default /home/lisq/.sd/models/ScuNET + */ + scunet_models_path?: string; + /** + * Swinir Models Path + * @description Path to directory with SwinIR model file(s). + * @default /home/lisq/.sd/models/SwinIR + */ + swinir_models_path?: string; + }; + /** HTTPValidationError */ + HTTPValidationError: { + /** Detail */ + detail?: components["schemas"]["ValidationError"][]; + }; + /** HypernetworkItem */ + HypernetworkItem: { + /** Name */ + name: string; + /** Path */ + path?: string; + }; + /** ImageToImageResponse */ + ImageToImageResponse: { + /** + * Image + * @description The generated image in base64 format. + */ + images?: string[]; + /** Parameters */ + parameters: Record; + /** Info */ + info: string; + }; + /** InterrogateRequest */ + InterrogateRequest: { + /** + * Image + * @description Image to work on, must be a Base64 string containing the image's data. + * @default + */ + image?: string; + /** + * Model + * @description The interrogate model used. + * @default clip + */ + model?: string; + }; + /** LatentUpscalerModeItem */ + LatentUpscalerModeItem: { + /** Name */ + name: string; + }; + /** MemoryResponse */ + MemoryResponse: { + /** + * RAM + * @description System memory stats + */ + ram: Record; + /** + * CUDA + * @description nVidia CUDA memory stats + */ + cuda: Record; + }; + /** Options */ + Options: { + /** + * Samples Save + * @description Always save all generated images + * @default true + */ + samples_save?: boolean; + /** + * Samples Format + * @description File format for images + * @default png + */ + samples_format?: string; + /** + * Samples Filename Pattern + * @description Images filename pattern + * @default + */ + samples_filename_pattern?: unknown; + /** + * Save Images Add Number + * @description Add number to filename when saving + * @default true + */ + save_images_add_number?: boolean; + /** + * Grid Save + * @description Always save all generated image grids + * @default true + */ + grid_save?: boolean; + /** + * Grid Format + * @description File format for grids + * @default png + */ + grid_format?: string; + /** + * Grid Extended Filename + * @description Add extended info (seed, prompt) to filename when saving grid + * @default false + */ + grid_extended_filename?: unknown; + /** + * Grid Only If Multiple + * @description Do not save grids consisting of one picture + * @default true + */ + grid_only_if_multiple?: boolean; + /** + * Grid Prevent Empty Spots + * @description Prevent empty spots in grid (when set to autodetect) + * @default false + */ + grid_prevent_empty_spots?: unknown; + /** + * Grid Zip Filename Pattern + * @description Archive filename pattern + * @default + */ + grid_zip_filename_pattern?: unknown; + /** + * N Rows + * @description Grid row count; use -1 for autodetect and 0 for it to be same as batch size + * @default -1 + */ + n_rows?: number; + /** + * Font + * @description Font for image grids that have text + * @default + */ + font?: unknown; + /** + * Grid Text Active Color + * @description Text color for image grids + * @default #000000 + */ + grid_text_active_color?: string; + /** + * Grid Text Inactive Color + * @description Inactive text color for image grids + * @default #999999 + */ + grid_text_inactive_color?: string; + /** + * Grid Background Color + * @description Background color for image grids + * @default #ffffff + */ + grid_background_color?: string; + /** + * Enable Pnginfo + * @description Save text information about generation parameters as chunks to png files + * @default true + */ + enable_pnginfo?: boolean; + /** + * Save Txt + * @description Create a text file next to every image with generation parameters. + * @default false + */ + save_txt?: unknown; + /** + * Save Images Before Face Restoration + * @description Save a copy of image before doing face restoration. + * @default false + */ + save_images_before_face_restoration?: unknown; + /** + * Save Images Before Highres Fix + * @description Save a copy of image before applying highres fix. + * @default false + */ + save_images_before_highres_fix?: unknown; + /** + * Save Images Before Color Correction + * @description Save a copy of image before applying color correction to img2img results + * @default false + */ + save_images_before_color_correction?: unknown; + /** + * Save Mask + * @description For inpainting, save a copy of the greyscale mask + * @default false + */ + save_mask?: unknown; + /** + * Save Mask Composite + * @description For inpainting, save a masked composite + * @default false + */ + save_mask_composite?: unknown; + /** + * Jpeg Quality + * @description Quality for saved jpeg images + * @default 80 + */ + jpeg_quality?: number; + /** + * Webp Lossless + * @description Use lossless compression for webp images + * @default false + */ + webp_lossless?: unknown; + /** + * Export For 4Chan + * @description Save copy of large images as JPG + * @default true + */ + export_for_4chan?: boolean; + /** + * Img Downscale Threshold + * @description File size limit for the above option, MB + * @default 4 + */ + img_downscale_threshold?: number; + /** + * Target Side Length + * @description Width/height limit for the above option, in pixels + * @default 4000 + */ + target_side_length?: number; + /** + * Img Max Size Mp + * @description Maximum image size + * @default 200 + */ + img_max_size_mp?: number; + /** + * Use Original Name Batch + * @description Use original name for output filename during batch process in extras tab + * @default true + */ + use_original_name_batch?: boolean; + /** + * Use Upscaler Name As Suffix + * @description Use upscaler name as filename suffix in the extras tab + * @default false + */ + use_upscaler_name_as_suffix?: unknown; + /** + * Save Selected Only + * @description When using 'Save' button, only save a single selected image + * @default true + */ + save_selected_only?: boolean; + /** + * Save Init Img + * @description Save init images when using img2img + * @default false + */ + save_init_img?: unknown; + /** + * Temp Dir + * @description Directory for temporary images; leave empty for default + * @default + */ + temp_dir?: unknown; + /** + * Clean Temp Dir At Start + * @description Cleanup non-default temporary directory when starting webui + * @default false + */ + clean_temp_dir_at_start?: unknown; + /** + * Outdir Samples + * @description Output directory for images; if empty, defaults to three directories below + * @default + */ + outdir_samples?: unknown; + /** + * Outdir Txt2Img Samples + * @description Output directory for txt2img images + * @default outputs/txt2img-images + */ + outdir_txt2img_samples?: string; + /** + * Outdir Img2Img Samples + * @description Output directory for img2img images + * @default outputs/img2img-images + */ + outdir_img2img_samples?: string; + /** + * Outdir Extras Samples + * @description Output directory for images from extras tab + * @default outputs/extras-images + */ + outdir_extras_samples?: string; + /** + * Outdir Grids + * @description Output directory for grids; if empty, defaults to two directories below + * @default + */ + outdir_grids?: unknown; + /** + * Outdir Txt2Img Grids + * @description Output directory for txt2img grids + * @default outputs/txt2img-grids + */ + outdir_txt2img_grids?: string; + /** + * Outdir Img2Img Grids + * @description Output directory for img2img grids + * @default outputs/img2img-grids + */ + outdir_img2img_grids?: string; + /** + * Outdir Save + * @description Directory for saving images using the Save button + * @default log/images + */ + outdir_save?: string; + /** + * Outdir Init Images + * @description Directory for saving init images when using img2img + * @default outputs/init-images + */ + outdir_init_images?: string; + /** + * Save To Dirs + * @description Save images to a subdirectory + * @default true + */ + save_to_dirs?: boolean; + /** + * Grid Save To Dirs + * @description Save grids to a subdirectory + * @default true + */ + grid_save_to_dirs?: boolean; + /** + * Use Save To Dirs For Ui + * @description When using "Save" button, save images to a subdirectory + * @default false + */ + use_save_to_dirs_for_ui?: unknown; + /** + * Directories Filename Pattern + * @description Directory name pattern + * @default [date] + */ + directories_filename_pattern?: string; + /** + * Directories Max Prompt Words + * @description Max prompt words for [prompt_words] pattern + * @default 8 + */ + directories_max_prompt_words?: number; + /** + * Esrgan Tile + * @description Tile size for ESRGAN upscalers. + * @default 192 + */ + ESRGAN_tile?: number; + /** + * Esrgan Tile Overlap + * @description Tile overlap for ESRGAN upscalers. + * @default 8 + */ + ESRGAN_tile_overlap?: number; + /** + * Realesrgan Enabled Models + * @description Select which Real-ESRGAN models to show in the web UI. + * @default [ + * "R-ESRGAN 4x+", + * "R-ESRGAN 4x+ Anime6B" + * ] + */ + realesrgan_enabled_models?: unknown[]; + /** + * Upscaler For Img2Img + * @description Upscaler for img2img + */ + upscaler_for_img2img?: unknown; + /** + * Face Restoration Model + * @description Face restoration model + * @default CodeFormer + */ + face_restoration_model?: string; + /** + * Code Former Weight + * @description CodeFormer weight + * @default 0.5 + */ + code_former_weight?: number; + /** + * Face Restoration Unload + * @description Move face restoration model from VRAM into RAM after processing + * @default false + */ + face_restoration_unload?: unknown; + /** + * Show Warnings + * @description Show warnings in console. + * @default false + */ + show_warnings?: unknown; + /** + * Memmon Poll Rate + * @description VRAM usage polls per second during generation. + * @default 8 + */ + memmon_poll_rate?: number; + /** + * Samples Log Stdout + * @description Always print all generation info to standard output + * @default false + */ + samples_log_stdout?: unknown; + /** + * Multiple Tqdm + * @description Add a second progress bar to the console that shows progress for an entire job. + * @default true + */ + multiple_tqdm?: boolean; + /** + * Print Hypernet Extra + * @description Print extra hypernetwork information to console. + * @default false + */ + print_hypernet_extra?: unknown; + /** + * List Hidden Files + * @description Load models/files in hidden directories + * @default true + */ + list_hidden_files?: boolean; + /** + * Disable Mmap Load Safetensors + * @description Disable memmapping for loading .safetensors files. + * @default false + */ + disable_mmap_load_safetensors?: unknown; + /** + * Unload Models When Training + * @description Move VAE and CLIP to RAM when training if possible. Saves VRAM. + * @default false + */ + unload_models_when_training?: unknown; + /** + * Pin Memory + * @description Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage. + * @default false + */ + pin_memory?: unknown; + /** + * Save Optimizer State + * @description Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file. + * @default false + */ + save_optimizer_state?: unknown; + /** + * Save Training Settings To Txt + * @description Save textual inversion and hypernet settings to a text file whenever training starts. + * @default true + */ + save_training_settings_to_txt?: boolean; + /** + * Dataset Filename Word Regex + * @description Filename word regex + * @default + */ + dataset_filename_word_regex?: unknown; + /** + * Dataset Filename Join String + * @description Filename join string + * @default + */ + dataset_filename_join_string?: string; + /** + * Training Image Repeats Per Epoch + * @description Number of repeats for a single input image per epoch; used only for displaying epoch number + * @default 1 + */ + training_image_repeats_per_epoch?: number; + /** + * Training Write Csv Every + * @description Save an csv containing the loss to log directory every N steps, 0 to disable + * @default 500 + */ + training_write_csv_every?: number; + /** + * Training Xattention Optimizations + * @description Use cross attention optimizations while training + * @default false + */ + training_xattention_optimizations?: unknown; + /** + * Training Enable Tensorboard + * @description Enable tensorboard logging. + * @default false + */ + training_enable_tensorboard?: unknown; + /** + * Training Tensorboard Save Images + * @description Save generated images within tensorboard. + * @default false + */ + training_tensorboard_save_images?: unknown; + /** + * Training Tensorboard Flush Every + * @description How often, in seconds, to flush the pending tensorboard events and summaries to disk. + * @default 120 + */ + training_tensorboard_flush_every?: number; + /** + * Sd Model Checkpoint + * @description Stable Diffusion checkpoint + */ + sd_model_checkpoint?: unknown; + /** + * Sd Checkpoint Cache + * @description Checkpoints to cache in RAM + * @default 0 + */ + sd_checkpoint_cache?: unknown; + /** + * Sd Vae Checkpoint Cache + * @description VAE Checkpoints to cache in RAM + * @default 0 + */ + sd_vae_checkpoint_cache?: unknown; + /** + * Sd Vae + * @description SD VAE + * @default Automatic + */ + sd_vae?: string; + /** + * Sd Vae As Default + * @description Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them + * @default true + */ + sd_vae_as_default?: boolean; + /** + * Sd Unet + * @description SD Unet + * @default Automatic + */ + sd_unet?: string; + /** + * Inpainting Mask Weight + * @description Inpainting conditioning mask strength + * @default 1 + */ + inpainting_mask_weight?: number; + /** + * Initial Noise Multiplier + * @description Noise multiplier for img2img + * @default 1 + */ + initial_noise_multiplier?: number; + /** + * Img2Img Color Correction + * @description Apply color correction to img2img results to match original colors. + * @default false + */ + img2img_color_correction?: unknown; + /** + * Img2Img Fix Steps + * @description With img2img, do exactly the amount of steps the slider specifies. + * @default false + */ + img2img_fix_steps?: unknown; + /** + * Img2Img Background Color + * @description With img2img, fill image's transparent parts with this color. + * @default #ffffff + */ + img2img_background_color?: string; + /** + * Enable Quantization + * @description Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply. + * @default false + */ + enable_quantization?: unknown; + /** + * Enable Emphasis + * @description Enable emphasis + * @default true + */ + enable_emphasis?: boolean; + /** + * Enable Batch Seeds + * @description Make K-diffusion samplers produce same images in a batch as when making a single image + * @default true + */ + enable_batch_seeds?: boolean; + /** + * Comma Padding Backtrack + * @description Prompt word wrap length limit + * @default 20 + */ + comma_padding_backtrack?: number; + /** + * Clip Stop At Last Layers + * @description Clip skip + * @default 1 + */ + CLIP_stop_at_last_layers?: number; + /** + * Upcast Attn + * @description Upcast cross attention layer to float32 + * @default false + */ + upcast_attn?: unknown; + /** + * Auto Vae Precision + * @description Automaticlly revert VAE to 32-bit floats + * @default true + */ + auto_vae_precision?: boolean; + /** + * Randn Source + * @description Random number generator source. + * @default GPU + */ + randn_source?: string; + /** + * Sdxl Crop Top + * @description crop top coordinate + * @default 0 + */ + sdxl_crop_top?: unknown; + /** + * Sdxl Crop Left + * @description crop left coordinate + * @default 0 + */ + sdxl_crop_left?: unknown; + /** + * Sdxl Refiner Low Aesthetic Score + * @description SDXL low aesthetic score + * @default 2.5 + */ + sdxl_refiner_low_aesthetic_score?: number; + /** + * Sdxl Refiner High Aesthetic Score + * @description SDXL high aesthetic score + * @default 6 + */ + sdxl_refiner_high_aesthetic_score?: number; + /** + * Cross Attention Optimization + * @description Cross attention optimization + * @default Automatic + */ + cross_attention_optimization?: string; + /** + * S Min Uncond + * @description Negative Guidance minimum sigma + * @default 0 + */ + s_min_uncond?: unknown; + /** + * Token Merging Ratio + * @description Token merging ratio + * @default 0 + */ + token_merging_ratio?: unknown; + /** + * Token Merging Ratio Img2Img + * @description Token merging ratio for img2img + * @default 0 + */ + token_merging_ratio_img2img?: unknown; + /** + * Token Merging Ratio Hr + * @description Token merging ratio for high-res pass + * @default 0 + */ + token_merging_ratio_hr?: unknown; + /** + * Pad Cond Uncond + * @description Pad prompt/negative prompt to be same length + * @default false + */ + pad_cond_uncond?: unknown; + /** + * Experimental Persistent Cond Cache + * @description persistent cond cache + * @default false + */ + experimental_persistent_cond_cache?: unknown; + /** + * Use Old Emphasis Implementation + * @description Use old emphasis implementation. Can be useful to reproduce old seeds. + * @default false + */ + use_old_emphasis_implementation?: unknown; + /** + * Use Old Karras Scheduler Sigmas + * @description Use old karras scheduler sigmas (0.1 to 10). + * @default false + */ + use_old_karras_scheduler_sigmas?: unknown; + /** + * No Dpmpp Sde Batch Determinism + * @description Do not make DPM++ SDE deterministic across different batch sizes. + * @default false + */ + no_dpmpp_sde_batch_determinism?: unknown; + /** + * Use Old Hires Fix Width Height + * @description For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to). + * @default false + */ + use_old_hires_fix_width_height?: unknown; + /** + * Dont Fix Second Order Samplers Schedule + * @description Do not fix prompt schedule for second order samplers. + * @default false + */ + dont_fix_second_order_samplers_schedule?: unknown; + /** + * Hires Fix Use Firstpass Conds + * @description For hires fix, calculate conds of second pass using extra networks of first pass. + * @default false + */ + hires_fix_use_firstpass_conds?: unknown; + /** + * Interrogate Keep Models In Memory + * @description Keep models in VRAM + * @default false + */ + interrogate_keep_models_in_memory?: unknown; + /** + * Interrogate Return Ranks + * @description Include ranks of model tags matches in results. + * @default false + */ + interrogate_return_ranks?: unknown; + /** + * Interrogate Clip Num Beams + * @description BLIP: num_beams + * @default 1 + */ + interrogate_clip_num_beams?: number; + /** + * Interrogate Clip Min Length + * @description BLIP: minimum description length + * @default 24 + */ + interrogate_clip_min_length?: number; + /** + * Interrogate Clip Max Length + * @description BLIP: maximum description length + * @default 48 + */ + interrogate_clip_max_length?: number; + /** + * Interrogate Clip Dict Limit + * @description CLIP: maximum number of lines in text file + * @default 1500 + */ + interrogate_clip_dict_limit?: number; + /** + * Interrogate Clip Skip Categories + * @description CLIP: skip inquire categories + * @default [] + */ + interrogate_clip_skip_categories?: unknown; + /** + * Interrogate Deepbooru Score Threshold + * @description deepbooru: score threshold + * @default 0.5 + */ + interrogate_deepbooru_score_threshold?: number; + /** + * Deepbooru Sort Alpha + * @description deepbooru: sort tags alphabetically + * @default true + */ + deepbooru_sort_alpha?: boolean; + /** + * Deepbooru Use Spaces + * @description deepbooru: use spaces in tags + * @default true + */ + deepbooru_use_spaces?: boolean; + /** + * Deepbooru Escape + * @description deepbooru: escape (\) brackets + * @default true + */ + deepbooru_escape?: boolean; + /** + * Deepbooru Filter Tags + * @description deepbooru: filter out those tags + * @default + */ + deepbooru_filter_tags?: unknown; + /** + * Extra Networks Show Hidden Directories + * @description Show hidden directories + * @default true + */ + extra_networks_show_hidden_directories?: boolean; + /** + * Extra Networks Hidden Models + * @description Show cards for models in hidden directories + * @default When searched + */ + extra_networks_hidden_models?: string; + /** + * Extra Networks Default Multiplier + * @description Default multiplier for extra networks + * @default 1 + */ + extra_networks_default_multiplier?: number; + /** + * Extra Networks Card Width + * @description Card width for Extra Networks + * @default 0 + */ + extra_networks_card_width?: unknown; + /** + * Extra Networks Card Height + * @description Card height for Extra Networks + * @default 0 + */ + extra_networks_card_height?: unknown; + /** + * Extra Networks Card Text Scale + * @description Card text scale + * @default 1 + */ + extra_networks_card_text_scale?: number; + /** + * Extra Networks Card Show Desc + * @description Show description on card + * @default true + */ + extra_networks_card_show_desc?: boolean; + /** + * Extra Networks Add Text Separator + * @description Extra networks separator + * @default + */ + extra_networks_add_text_separator?: string; + /** + * Ui Extra Networks Tab Reorder + * @description Extra networks tab order + * @default + */ + ui_extra_networks_tab_reorder?: unknown; + /** + * Textual Inversion Print At Load + * @description Print a list of Textual Inversion embeddings when loading model + * @default false + */ + textual_inversion_print_at_load?: unknown; + /** + * Textual Inversion Add Hashes To Infotext + * @description Add Textual Inversion hashes to infotext + * @default true + */ + textual_inversion_add_hashes_to_infotext?: boolean; + /** + * Sd Hypernetwork + * @description Add hypernetwork to prompt + * @default None + */ + sd_hypernetwork?: string; + /** + * Localization + * @description Localization + * @default None + */ + localization?: string; + /** + * Gradio Theme + * @description Gradio theme + * @default Default + */ + gradio_theme?: string; + /** + * Img2Img Editor Height + * @description img2img: height of image editor + * @default 720 + */ + img2img_editor_height?: number; + /** + * Return Grid + * @description Show grid in results for web + * @default true + */ + return_grid?: boolean; + /** + * Return Mask + * @description For inpainting, include the greyscale mask in results for web + * @default false + */ + return_mask?: unknown; + /** + * Return Mask Composite + * @description For inpainting, include masked composite in results for web + * @default false + */ + return_mask_composite?: unknown; + /** + * Do Not Show Images + * @description Do not show any images in results for web + * @default false + */ + do_not_show_images?: unknown; + /** + * Send Seed + * @description Send seed when sending prompt or image to other interface + * @default true + */ + send_seed?: boolean; + /** + * Send Size + * @description Send size when sending prompt or image to another interface + * @default true + */ + send_size?: boolean; + /** + * Js Modal Lightbox + * @description Enable full page image viewer + * @default true + */ + js_modal_lightbox?: boolean; + /** + * Js Modal Lightbox Initially Zoomed + * @description Show images zoomed in by default in full page image viewer + * @default true + */ + js_modal_lightbox_initially_zoomed?: boolean; + /** + * Js Modal Lightbox Gamepad + * @description Navigate image viewer with gamepad + * @default false + */ + js_modal_lightbox_gamepad?: unknown; + /** + * Js Modal Lightbox Gamepad Repeat + * @description Gamepad repeat period, in milliseconds + * @default 250 + */ + js_modal_lightbox_gamepad_repeat?: number; + /** + * Show Progress In Title + * @description Show generation progress in window title. + * @default true + */ + show_progress_in_title?: boolean; + /** + * Samplers In Dropdown + * @description Use dropdown for sampler selection instead of radio group + * @default true + */ + samplers_in_dropdown?: boolean; + /** + * Dimensions And Batch Together + * @description Show Width/Height and Batch sliders in same row + * @default true + */ + dimensions_and_batch_together?: boolean; + /** + * Keyedit Precision Attention + * @description Ctrl+up/down precision when editing (attention:1.1) + * @default 0.1 + */ + keyedit_precision_attention?: number; + /** + * Keyedit Precision Extra + * @description Ctrl+up/down precision when editing + * @default 0.05 + */ + keyedit_precision_extra?: number; + /** + * Keyedit Delimiters + * @description Ctrl+up/down word delimiters + * @default .,\/!?%^*;:{}=`~() + */ + keyedit_delimiters?: string; + /** + * Keyedit Move + * @description Alt+left/right moves prompt elements + * @default true + */ + keyedit_move?: boolean; + /** + * Quicksettings List + * @description Quicksettings list + * @default [ + * "sd_model_checkpoint" + * ] + */ + quicksettings_list?: unknown[]; + /** + * Ui Tab Order + * @description UI tab order + * @default [] + */ + ui_tab_order?: unknown; + /** + * Hidden Tabs + * @description Hidden UI tabs + * @default [] + */ + hidden_tabs?: unknown; + /** + * Ui Reorder List + * @description txt2img/img2img UI item order + * @default [] + */ + ui_reorder_list?: unknown; + /** + * Hires Fix Show Sampler + * @description Hires fix: show hires sampler selection + * @default false + */ + hires_fix_show_sampler?: unknown; + /** + * Hires Fix Show Prompts + * @description Hires fix: show hires prompt and negative prompt + * @default false + */ + hires_fix_show_prompts?: unknown; + /** + * Disable Token Counters + * @description Disable prompt token counters + * @default false + */ + disable_token_counters?: unknown; + /** + * Add Model Hash To Info + * @description Add model hash to generation information + * @default true + */ + add_model_hash_to_info?: boolean; + /** + * Add Model Name To Info + * @description Add model name to generation information + * @default true + */ + add_model_name_to_info?: boolean; + /** + * Add User Name To Info + * @description Add user name to generation information when authenticated + * @default false + */ + add_user_name_to_info?: unknown; + /** + * Add Version To Infotext + * @description Add program version to generation information + * @default true + */ + add_version_to_infotext?: boolean; + /** + * Disable Weights Auto Swap + * @description Disregard checkpoint information from pasted infotext + * @default true + */ + disable_weights_auto_swap?: boolean; + /** + * Infotext Styles + * @description Infer styles from prompts of pasted infotext + * @default Apply if any + */ + infotext_styles?: string; + /** + * Show Progressbar + * @description Show progressbar + * @default true + */ + show_progressbar?: boolean; + /** + * Live Previews Enable + * @description Show live previews of the created image + * @default true + */ + live_previews_enable?: boolean; + /** + * Live Previews Image Format + * @description Live preview file format + * @default png + */ + live_previews_image_format?: string; + /** + * Show Progress Grid + * @description Show previews of all images generated in a batch as a grid + * @default true + */ + show_progress_grid?: boolean; + /** + * Show Progress Every N Steps + * @description Live preview display period + * @default 10 + */ + show_progress_every_n_steps?: number; + /** + * Show Progress Type + * @description Live preview method + * @default Approx NN + */ + show_progress_type?: string; + /** + * Live Preview Content + * @description Live preview subject + * @default Prompt + */ + live_preview_content?: string; + /** + * Live Preview Refresh Period + * @description Progressbar and preview update period + * @default 1000 + */ + live_preview_refresh_period?: number; + /** + * Hide Samplers + * @description Hide samplers in user interface + * @default [] + */ + hide_samplers?: unknown; + /** + * Eta Ddim + * @description Eta for DDIM + * @default 0 + */ + eta_ddim?: unknown; + /** + * Eta Ancestral + * @description Eta for ancestral samplers + * @default 1 + */ + eta_ancestral?: number; + /** + * Ddim Discretize + * @description img2img DDIM discretize + * @default uniform + */ + ddim_discretize?: string; + /** + * S Churn + * @description sigma churn + * @default 0 + */ + s_churn?: unknown; + /** + * S Tmin + * @description sigma tmin + * @default 0 + */ + s_tmin?: unknown; + /** + * S Noise + * @description sigma noise + * @default 1 + */ + s_noise?: number; + /** + * K Sched Type + * @description scheduler type + * @default Automatic + */ + k_sched_type?: string; + /** + * Sigma Min + * @description sigma min + * @default 0 + */ + sigma_min?: unknown; + /** + * Sigma Max + * @description sigma max + * @default 0 + */ + sigma_max?: unknown; + /** + * Rho + * @description rho + * @default 0 + */ + rho?: unknown; + /** + * Eta Noise Seed Delta + * @description Eta noise seed delta + * @default 0 + */ + eta_noise_seed_delta?: unknown; + /** + * Always Discard Next To Last Sigma + * @description Always discard next-to-last sigma + * @default false + */ + always_discard_next_to_last_sigma?: unknown; + /** + * Uni Pc Variant + * @description UniPC variant + * @default bh1 + */ + uni_pc_variant?: string; + /** + * Uni Pc Skip Type + * @description UniPC skip type + * @default time_uniform + */ + uni_pc_skip_type?: string; + /** + * Uni Pc Order + * @description UniPC order + * @default 3 + */ + uni_pc_order?: number; + /** + * Uni Pc Lower Order Final + * @description UniPC lower order final + * @default true + */ + uni_pc_lower_order_final?: boolean; + /** + * Postprocessing Enable In Main Ui + * @description Enable postprocessing operations in txt2img and img2img tabs + * @default [] + */ + postprocessing_enable_in_main_ui?: unknown; + /** + * Postprocessing Operation Order + * @description Postprocessing operation order + * @default [] + */ + postprocessing_operation_order?: unknown; + /** + * Upscaling Max Images In Cache + * @description Maximum number of images in upscaling cache + * @default 5 + */ + upscaling_max_images_in_cache?: number; + /** + * Disabled Extensions + * @description Disable these extensions + * @default [] + */ + disabled_extensions?: unknown; + /** + * Disable All Extensions + * @description Disable all extensions (preserves the list of disabled extensions) + * @default none + */ + disable_all_extensions?: string; + /** + * Restore Config State File + * @description Config state file to restore from, under 'config-states/' folder + * @default + */ + restore_config_state_file?: unknown; + /** + * Sd Checkpoint Hash + * @description SHA256 hash of the current checkpoint + * @default + */ + sd_checkpoint_hash?: unknown; + /** + * Sd Lora + * @description Add network to prompt + * @default None + */ + sd_lora?: string; + /** + * Lora Preferred Name + * @description When adding to prompt, refer to Lora by + * @default Alias from file + */ + lora_preferred_name?: string; + /** + * Lora Add Hashes To Infotext + * @description Add Lora hashes to infotext + * @default true + */ + lora_add_hashes_to_infotext?: boolean; + /** + * Lora Show All + * @description Always show all networks on the Lora page + * @default false + */ + lora_show_all?: unknown; + /** + * Lora Hide Unknown For Versions + * @description Hide networks of unknown versions for model versions + * @default [] + */ + lora_hide_unknown_for_versions?: unknown; + /** + * Lora Functional + * @description Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension + * @default false + */ + lora_functional?: unknown; + /** + * Canvas Hotkey Zoom + * @description Zoom canvas + * @default Alt + */ + canvas_hotkey_zoom?: string; + /** + * Canvas Hotkey Adjust + * @description Adjust brush size + * @default Ctrl + */ + canvas_hotkey_adjust?: string; + /** + * Canvas Hotkey Move + * @description Moving the canvas + * @default F + */ + canvas_hotkey_move?: string; + /** + * Canvas Hotkey Fullscreen + * @description Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width + * @default S + */ + canvas_hotkey_fullscreen?: string; + /** + * Canvas Hotkey Reset + * @description Reset zoom and canvas positon + * @default R + */ + canvas_hotkey_reset?: string; + /** + * Canvas Hotkey Overlap + * @description Toggle overlap + * @default O + */ + canvas_hotkey_overlap?: string; + /** + * Canvas Show Tooltip + * @description Enable tooltip on the canvas + * @default true + */ + canvas_show_tooltip?: boolean; + /** + * Canvas Blur Prompt + * @description Take the focus off the prompt when working with a canvas + * @default false + */ + canvas_blur_prompt?: unknown; + /** + * Canvas Disabled Functions + * @description Disable function that you don't use + * @default [ + * "Overlap" + * ] + */ + canvas_disabled_functions?: unknown[]; + /** + * Extra Options + * @description Options in main UI + * @default [] + */ + extra_options?: unknown; + /** + * Extra Options Accordion + * @description Place options in main UI into an accordion + * @default false + */ + extra_options_accordion?: unknown; + }; + /** PNGInfoRequest */ + PNGInfoRequest: { + /** + * Image + * @description The base64 encoded PNG image + */ + image: string; + }; + /** PNGInfoResponse */ + PNGInfoResponse: { + /** + * Image info + * @description A string with the parameters used to generate the image + */ + info: string; + /** + * Items + * @description An object containing all the info the image had + */ + items: Record; + }; + /** PreprocessResponse */ + PreprocessResponse: { + /** + * Preprocess info + * @description Response string from preprocessing task. + */ + info: string; + }; + /** ProgressResponse */ + ProgressResponse: { + /** + * Progress + * @description The progress with a range of 0 to 1 + */ + progress: number; + /** ETA in secs */ + eta_relative: number; + /** + * State + * @description The current state snapshot + */ + state: Record; + /** + * Current image + * @description The current image in base64 format. opts.show_progress_every_n_steps is required for this to work. + */ + current_image?: string; + /** + * Info text + * @description Info text used by WebUI. + */ + textinfo?: string; + }; + /** PromptStyleItem */ + PromptStyleItem: { + /** Name */ + name: string; + /** Prompt */ + prompt?: string; + /** Negative Prompt */ + negative_prompt?: string; + }; + /** RealesrganItem */ + RealesrganItem: { + /** Name */ + name: string; + /** Path */ + path?: string; + /** Scale */ + scale?: number; + }; + /** SDModelItem */ + SDModelItem: { + /** Title */ + title: string; + /** Model Name */ + model_name: string; + /** Short hash */ + hash?: string; + /** sha256 hash */ + sha256?: string; + /** Filename */ + filename: string; + /** Config file */ + config?: string; + }; + /** SDVaeItem */ + SDVaeItem: { + /** Model Name */ + model_name: string; + /** Filename */ + filename: string; + }; + /** SamplerItem */ + SamplerItem: { + /** Name */ + name: string; + /** Aliases */ + aliases: string[]; + /** Options */ + options: { + [key: string]: string; + }; + }; + /** ScriptArg */ + ScriptArg: { + /** + * Label + * @description Name of the argument in UI + */ + label?: string; + /** + * Value + * @description Default value of the argument + */ + value?: unknown; + /** + * Minimum + * @description Minimum allowed value for the argumentin UI + */ + minimum?: unknown; + /** + * Minimum + * @description Maximum allowed value for the argumentin UI + */ + maximum?: unknown; + /** + * Minimum + * @description Step for changing value of the argumentin UI + */ + step?: unknown; + /** + * Choices + * @description Possible values for the argument + */ + choices?: string[]; + }; + /** ScriptInfo */ + ScriptInfo: { + /** + * Name + * @description Script name + */ + name?: string; + /** + * IsAlwayson + * @description Flag specifying whether this script is an alwayson script + */ + is_alwayson?: boolean; + /** + * IsImg2img + * @description Flag specifying whether this script is an img2img script + */ + is_img2img?: boolean; + /** + * Arguments + * @description List of script's arguments + */ + args: components["schemas"]["ScriptArg"][]; + }; + /** ScriptsList */ + ScriptsList: { + /** + * Txt2img + * @description Titles of scripts (txt2img) + */ + txt2img?: unknown[]; + /** + * Img2img + * @description Titles of scripts (img2img) + */ + img2img?: unknown[]; + }; + /** StableDiffusionProcessingImg2Img */ + StableDiffusionProcessingImg2Img: { + /** Init Images */ + init_images?: unknown[]; + /** + * Resize Mode + * @default 0 + */ + resize_mode?: number; + /** + * Denoising Strength + * @default 0.75 + */ + denoising_strength?: number; + /** Image Cfg Scale */ + image_cfg_scale?: number; + /** Mask */ + mask?: string; + /** Mask Blur */ + mask_blur?: number; + /** + * Mask Blur X + * @default 4 + */ + mask_blur_x?: number; + /** + * Mask Blur Y + * @default 4 + */ + mask_blur_y?: number; + /** + * Inpainting Fill + * @default 0 + */ + inpainting_fill?: number; + /** + * Inpaint Full Res + * @default true + */ + inpaint_full_res?: boolean; + /** + * Inpaint Full Res Padding + * @default 0 + */ + inpaint_full_res_padding?: number; + /** + * Inpainting Mask Invert + * @default 0 + */ + inpainting_mask_invert?: number; + /** Initial Noise Multiplier */ + initial_noise_multiplier?: number; + /** + * Prompt + * @default + */ + prompt?: string; + /** Styles */ + styles?: string[]; + /** + * Seed + * @default -1 + */ + seed?: number; + /** + * Subseed + * @default -1 + */ + subseed?: number; + /** + * Subseed Strength + * @default 0 + */ + subseed_strength?: number; + /** + * Seed Resize From H + * @default -1 + */ + seed_resize_from_h?: number; + /** + * Seed Resize From W + * @default -1 + */ + seed_resize_from_w?: number; + /** Sampler Name */ + sampler_name?: string; + /** + * Batch Size + * @default 1 + */ + batch_size?: number; + /** + * N Iter + * @default 1 + */ + n_iter?: number; + /** + * Steps + * @default 50 + */ + steps?: number; + /** + * Cfg Scale + * @default 7 + */ + cfg_scale?: number; + /** + * Width + * @default 512 + */ + width?: number; + /** + * Height + * @default 512 + */ + height?: number; + /** + * Restore Faces + * @default false + */ + restore_faces?: boolean; + /** + * Tiling + * @default false + */ + tiling?: boolean; + /** + * Do Not Save Samples + * @default false + */ + do_not_save_samples?: boolean; + /** + * Do Not Save Grid + * @default false + */ + do_not_save_grid?: boolean; + /** Negative Prompt */ + negative_prompt?: string; + /** Eta */ + eta?: number; + /** + * S Min Uncond + * @default 0 + */ + s_min_uncond?: number; + /** + * S Churn + * @default 0 + */ + s_churn?: number; + /** S Tmax */ + s_tmax?: number; + /** + * S Tmin + * @default 0 + */ + s_tmin?: number; + /** + * S Noise + * @default 1 + */ + s_noise?: number; + /** Override Settings */ + override_settings?: Record; + /** + * Override Settings Restore Afterwards + * @default true + */ + override_settings_restore_afterwards?: boolean; + /** + * Script Args + * @default [] + */ + script_args?: unknown[]; + /** + * Sampler Index + * @default Euler + */ + sampler_index?: string; + /** + * Include Init Images + * @default false + */ + include_init_images?: boolean; + /** Script Name */ + script_name?: string; + /** + * Send Images + * @default true + */ + send_images?: boolean; + /** + * Save Images + * @default false + */ + save_images?: boolean; + /** + * Alwayson Scripts + * @default {} + */ + alwayson_scripts?: Record; + }; + /** StableDiffusionProcessingTxt2Img */ + StableDiffusionProcessingTxt2Img: { + /** + * Enable Hr + * @default false + */ + enable_hr?: boolean; + /** + * Denoising Strength + * @default 0 + */ + denoising_strength?: number; + /** + * Firstphase Width + * @default 0 + */ + firstphase_width?: number; + /** + * Firstphase Height + * @default 0 + */ + firstphase_height?: number; + /** + * Hr Scale + * @default 2 + */ + hr_scale?: number; + /** Hr Upscaler */ + hr_upscaler?: string; + /** + * Hr Second Pass Steps + * @default 0 + */ + hr_second_pass_steps?: number; + /** + * Hr Resize X + * @default 0 + */ + hr_resize_x?: number; + /** + * Hr Resize Y + * @default 0 + */ + hr_resize_y?: number; + /** Hr Sampler Name */ + hr_sampler_name?: string; + /** + * Hr Prompt + * @default + */ + hr_prompt?: string; + /** + * Hr Negative Prompt + * @default + */ + hr_negative_prompt?: string; + /** + * Prompt + * @default + */ + prompt?: string; + /** Styles */ + styles?: string[]; + /** + * Seed + * @default -1 + */ + seed?: number; + /** + * Subseed + * @default -1 + */ + subseed?: number; + /** + * Subseed Strength + * @default 0 + */ + subseed_strength?: number; + /** + * Seed Resize From H + * @default -1 + */ + seed_resize_from_h?: number; + /** + * Seed Resize From W + * @default -1 + */ + seed_resize_from_w?: number; + /** Sampler Name */ + sampler_name?: string; + /** + * Batch Size + * @default 1 + */ + batch_size?: number; + /** + * N Iter + * @default 1 + */ + n_iter?: number; + /** + * Steps + * @default 50 + */ + steps?: number; + /** + * Cfg Scale + * @default 7 + */ + cfg_scale?: number; + /** + * Width + * @default 512 + */ + width?: number; + /** + * Height + * @default 512 + */ + height?: number; + /** + * Restore Faces + * @default false + */ + restore_faces?: boolean; + /** + * Tiling + * @default false + */ + tiling?: boolean; + /** + * Do Not Save Samples + * @default false + */ + do_not_save_samples?: boolean; + /** + * Do Not Save Grid + * @default false + */ + do_not_save_grid?: boolean; + /** Negative Prompt */ + negative_prompt?: string; + /** Eta */ + eta?: number; + /** + * S Min Uncond + * @default 0 + */ + s_min_uncond?: number; + /** + * S Churn + * @default 0 + */ + s_churn?: number; + /** S Tmax */ + s_tmax?: number; + /** + * S Tmin + * @default 0 + */ + s_tmin?: number; + /** + * S Noise + * @default 1 + */ + s_noise?: number; + /** Override Settings */ + override_settings?: Record; + /** + * Override Settings Restore Afterwards + * @default true + */ + override_settings_restore_afterwards?: boolean; + /** + * Script Args + * @default [] + */ + script_args?: unknown[]; + /** + * Sampler Index + * @default Euler + */ + sampler_index?: string; + /** Script Name */ + script_name?: string; + /** + * Send Images + * @default true + */ + send_images?: boolean; + /** + * Save Images + * @default false + */ + save_images?: boolean; + /** + * Alwayson Scripts + * @default {} + */ + alwayson_scripts?: Record; + }; + /** TextToImageResponse */ + TextToImageResponse: { + /** + * Image + * @description The generated image in base64 format. + */ + images?: string[]; + /** Parameters */ + parameters: Record; + /** Info */ + info: string; + }; + /** TrainResponse */ + TrainResponse: { + /** + * Train info + * @description Response string from train embedding or hypernetwork task. + */ + info: string; + }; + /** UpscalerItem */ + UpscalerItem: { + /** Name */ + name: string; + /** Model Name */ + model_name?: string; + /** Path */ + model_path?: string; + /** URL */ + model_url?: string; + /** Scale */ + scale?: number; + }; + /** ValidationError */ + ValidationError: { + /** Location */ + loc: (string | number)[]; + /** Message */ + msg: string; + /** Error Type */ + type: string; + }; + }; + responses: never; + parameters: never; + requestBodies: never; + headers: never; + pathItems: never; } -interface SdRequest { - prompt: string; - denoising_strength: number; - styles: string[]; - negative_prompt: string; - seed: number; - subseed: number; - subseed_strength: number; - seed_resize_from_h: number; - seed_resize_from_w: number; - width: number; - height: number; - sampler_name: string; - batch_size: number; - n_iter: number; - steps: number; - cfg_scale: number; - restore_faces: boolean; - tiling: boolean; - do_not_save_samples: boolean; - do_not_save_grid: boolean; - eta: number; - s_min_uncond: number; - s_churn: number; - s_tmax: number; - s_tmin: number; - s_noise: number; - override_settings: object; - override_settings_restore_afterwards: boolean; - script_args: unknown[]; - sampler_index: string; - script_name: string; - send_images: boolean; - save_images: boolean; - alwayson_scripts: object; -} - -export async function sdTxt2Img( - api: SdApi, - params: Partial, - onProgress?: (progress: SdProgressResponse) => void, -): Promise> { - const request = fetchSdApi>( - api, - "sdapi/v1/txt2img", - { body: params }, - ) - // JSON field "info" is a JSON-serialized string so we need to parse this part second time - .then((data) => ({ - ...data, - info: typeof data.info === "string" ? JSON.parse(data.info) : data.info, - })); - - try { - while (true) { - await Promise.race([request, Async.delay(3000)]); - if (await AsyncX.promiseState(request) !== "pending") return await request; - onProgress?.( - await fetchSdApi(api, "sdapi/v1/progress", { timeoutMs: 10_000 }), - ); - } - } finally { - if (await AsyncX.promiseState(request) === "pending") { - await fetchSdApi(api, "sdapi/v1/interrupt", { body: {}, timeoutMs: 10_000 }); - } - } -} - -export interface SdTxt2ImgRequest extends SdRequest { - enable_hr: boolean; - firstphase_height: number; - firstphase_width: number; - hr_resize_x: number; - hr_negative_prompt: string; - hr_prompt: string; - hr_resize_y: number; - hr_sampler_name: string; - hr_scale: number; - hr_second_pass_steps: number; - hr_upscaler: string; -} - -export async function sdImg2Img( - api: SdApi, - params: Partial, - onProgress?: (progress: SdProgressResponse) => void, -): Promise> { - const request = fetchSdApi>( - api, - "sdapi/v1/img2img", - { body: params }, - ) - // JSON field "info" is a JSON-serialized string so we need to parse this part second time - .then((data) => ({ - ...data, - info: typeof data.info === "string" ? JSON.parse(data.info) : data.info, - })); - - try { - while (true) { - await Promise.race([request, Async.delay(3000)]); - if (await AsyncX.promiseState(request) !== "pending") return await request; - onProgress?.( - await fetchSdApi(api, "sdapi/v1/progress", { timeoutMs: 10_000 }), - ); - } - } finally { - if (await AsyncX.promiseState(request) === "pending") { - await fetchSdApi(api, "sdapi/v1/interrupt", { body: {}, timeoutMs: 10_000 }); - } - } -} - -export interface SdImg2ImgRequest extends SdRequest { - image_cfg_scale: number; - include_init_images: boolean; - init_images: string[]; - initial_noise_multiplier: number; - inpaint_full_res: boolean; - inpaint_full_res_padding: number; - inpainting_fill: number; - inpainting_mask_invert: number; - mask: string; - mask_blur: number; - mask_blur_x: number; - mask_blur_y: number; - resize_mode: number; -} - -export interface SdResponse { - images: string[]; - parameters: T; - // Warning: raw response from API is a JSON-serialized string - info: SdTxt2ImgInfo; -} - -export interface SdTxt2ImgInfo { - prompt: string; - all_prompts: string[]; - negative_prompt: string; - all_negative_prompts: string[]; - seed: number; - all_seeds: number[]; - subseed: number; - all_subseeds: number[]; - subseed_strength: number; - width: number; - height: number; - sampler_name: string; - cfg_scale: number; - steps: number; - batch_size: number; - restore_faces: boolean; - face_restoration_model: unknown; - sd_model_hash: string; - seed_resize_from_w: number; - seed_resize_from_h: number; - denoising_strength: number; - extra_generation_params: SdTxt2ImgInfoExtraParams; - index_of_first_image: number; - infotexts: string[]; - styles: unknown[]; - job_timestamp: string; - clip_skip: number; - is_using_inpainting_conditioning: boolean; -} - -export interface SdTxt2ImgInfoExtraParams { - "Lora hashes": string; - "TI hashes": string; -} - -export interface SdProgressResponse { - progress: number; - eta_relative: number; - state: SdProgressState; - /** base64 encoded preview */ - current_image: string | null; - textinfo: string | null; -} - -export interface SdProgressState { - skipped: boolean; - interrupted: boolean; - job: string; - job_count: number; - job_timestamp: string; - job_no: number; - sampling_step: number; - sampling_steps: number; -} - -export function sdGetConfig(api: SdApi): Promise { - return fetchSdApi(api, "config", { timeoutMs: 10_000 }); -} - -export interface SdConfigResponse { - /** version with new line at the end for some reason */ - version: string; - mode: string; - dev_mode: boolean; - analytics_enabled: boolean; - components: object[]; - css: unknown; - title: string; - is_space: boolean; - enable_queue: boolean; - show_error: boolean; - show_api: boolean; - is_colab: boolean; - stylesheets: unknown[]; - theme: string; - layout: object; - dependencies: object[]; - root: string; -} - -export interface SdErrorResponse { - /** - * The HTTP status message or array of invalid fields. - * Can also be empty string. - */ - detail: string | Array<{ loc: string[]; msg: string; type: string }>; - /** Can be e.g. "OutOfMemoryError" or undefined. */ - error?: string; - /** Empty string. */ - body?: string; - /** Long description of error. */ - errors?: string; -} - -export class SdApiError extends Error { - constructor( - public readonly endpoint: string, - public readonly options: RequestInit | undefined, - public readonly statusCode: number, - public readonly statusText: string, - public readonly response?: SdErrorResponse, - ) { - let message = `${options?.method ?? "GET"} ${endpoint} : ${statusCode} ${statusText}`; - if (response?.error) { - message += `: ${response.error}`; - if (response.errors) message += ` - ${response.errors}`; - } else if (typeof response?.detail === "string" && response.detail.length > 0) { - message += `: ${response.detail}`; - } else if (response?.detail) { - message += `: ${JSON.stringify(response.detail)}`; - } - super(message); - } +export type $defs = Record; + +export type external = Record; + +export interface operations { + /** Text2Imgapi */ + text2imgapi_sdapi_v1_txt2img_post: { + requestBody: { + content: { + "application/json": components["schemas"]["StableDiffusionProcessingTxt2Img"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["TextToImageResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Img2Imgapi */ + img2imgapi_sdapi_v1_img2img_post: { + requestBody: { + content: { + "application/json": components["schemas"]["StableDiffusionProcessingImg2Img"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ImageToImageResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Extras Single Image Api */ + extras_single_image_api_sdapi_v1_extra_single_image_post: { + requestBody: { + content: { + "application/json": components["schemas"]["ExtrasSingleImageRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ExtrasSingleImageResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Extras Batch Images Api */ + extras_batch_images_api_sdapi_v1_extra_batch_images_post: { + requestBody: { + content: { + "application/json": components["schemas"]["ExtrasBatchImagesRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ExtrasBatchImagesResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Pnginfoapi */ + pnginfoapi_sdapi_v1_png_info_post: { + requestBody: { + content: { + "application/json": components["schemas"]["PNGInfoRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["PNGInfoResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Progressapi */ + progressapi_sdapi_v1_progress_get: { + parameters: { + query?: { + skip_current_image?: boolean; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ProgressResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Interrogateapi */ + interrogateapi_sdapi_v1_interrogate_post: { + requestBody: { + content: { + "application/json": components["schemas"]["InterrogateRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Interruptapi */ + interruptapi_sdapi_v1_interrupt_post: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; + /** Skip */ + skip_sdapi_v1_skip_post: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; + /** Get Config */ + get_config_sdapi_v1_options_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["Options"]; + }; + }; + }; + }; + /** Set Config */ + set_config_sdapi_v1_options_post: { + requestBody: { + content: { + "application/json": Record; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Get Cmd Flags */ + get_cmd_flags_sdapi_v1_cmd_flags_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["Flags"]; + }; + }; + }; + }; + /** Get Samplers */ + get_samplers_sdapi_v1_samplers_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["SamplerItem"][]; + }; + }; + }; + }; + /** Get Upscalers */ + get_upscalers_sdapi_v1_upscalers_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["UpscalerItem"][]; + }; + }; + }; + }; + /** Get Latent Upscale Modes */ + get_latent_upscale_modes_sdapi_v1_latent_upscale_modes_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["LatentUpscalerModeItem"][]; + }; + }; + }; + }; + /** Get Sd Models */ + get_sd_models_sdapi_v1_sd_models_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["SDModelItem"][]; + }; + }; + }; + }; + /** Get Sd Vaes */ + get_sd_vaes_sdapi_v1_sd_vae_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["SDVaeItem"][]; + }; + }; + }; + }; + /** Get Hypernetworks */ + get_hypernetworks_sdapi_v1_hypernetworks_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["HypernetworkItem"][]; + }; + }; + }; + }; + /** Get Face Restorers */ + get_face_restorers_sdapi_v1_face_restorers_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["FaceRestorerItem"][]; + }; + }; + }; + }; + /** Get Realesrgan Models */ + get_realesrgan_models_sdapi_v1_realesrgan_models_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["RealesrganItem"][]; + }; + }; + }; + }; + /** Get Prompt Styles */ + get_prompt_styles_sdapi_v1_prompt_styles_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["PromptStyleItem"][]; + }; + }; + }; + }; + /** Get Embeddings */ + get_embeddings_sdapi_v1_embeddings_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["EmbeddingsResponse"]; + }; + }; + }; + }; + /** Refresh Checkpoints */ + refresh_checkpoints_sdapi_v1_refresh_checkpoints_post: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; + /** Create Embedding */ + create_embedding_sdapi_v1_create_embedding_post: { + requestBody: { + content: { + "application/json": Record; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["CreateResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Create Hypernetwork */ + create_hypernetwork_sdapi_v1_create_hypernetwork_post: { + requestBody: { + content: { + "application/json": Record; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["CreateResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Preprocess */ + preprocess_sdapi_v1_preprocess_post: { + requestBody: { + content: { + "application/json": Record; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["PreprocessResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Train Embedding */ + train_embedding_sdapi_v1_train_embedding_post: { + requestBody: { + content: { + "application/json": Record; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["TrainResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Train Hypernetwork */ + train_hypernetwork_sdapi_v1_train_hypernetwork_post: { + requestBody: { + content: { + "application/json": Record; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["TrainResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Get Memory */ + get_memory_sdapi_v1_memory_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["MemoryResponse"]; + }; + }; + }; + }; + /** Unloadapi */ + unloadapi_sdapi_v1_unload_checkpoint_post: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; + /** Reloadapi */ + reloadapi_sdapi_v1_reload_checkpoint_post: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; + /** Get Scripts List */ + get_scripts_list_sdapi_v1_scripts_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ScriptsList"]; + }; + }; + }; + }; + /** Get Script Info */ + get_script_info_sdapi_v1_script_info_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["ScriptInfo"][]; + }; + }; + }; + }; + /** Get Lora Info */ + get_lora_info_tacapi_v1_lora_info__lora_name__get: { + parameters: { + path: { + lora_name: unknown; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Get Lyco Info */ + get_lyco_info_tacapi_v1_lyco_info__lyco_name__get: { + parameters: { + path: { + lyco_name: unknown; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Get Thumb Preview */ + get_thumb_preview_tacapi_v1_thumb_preview__filename__get: { + parameters: { + query: { + type: unknown; + }; + path: { + filename: unknown; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Get Thumb Preview Blob */ + get_thumb_preview_blob_tacapi_v1_thumb_preview_blob__filename__get: { + parameters: { + query: { + type: unknown; + }; + path: { + filename: unknown; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + /** Get Loras */ + get_loras_sdapi_v1_loras_get: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; + /** Refresh Loras */ + refresh_loras_sdapi_v1_refresh_loras_post: { + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": unknown; + }; + }; + }; + }; } diff --git a/common/utils.ts b/common/utils.ts deleted file mode 100644 index 3362e39..0000000 --- a/common/utils.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { GrammyParseMode, GrammyTypes } from "../deps.ts"; - -export function formatOrdinal(n: number) { - if (n % 100 === 11 || n % 100 === 12 || n % 100 === 13) return `${n}th`; - if (n % 10 === 1) return `${n}st`; - if (n % 10 === 2) return `${n}nd`; - if (n % 10 === 3) return `${n}rd`; - return `${n}th`; -} - -export const fmt = ( - rawStringParts: TemplateStringsArray | GrammyParseMode.Stringable[], - ...stringLikes: GrammyParseMode.Stringable[] -): GrammyParseMode.FormattedString => { - let text = ""; - const entities: GrammyTypes.MessageEntity[] = []; - - const length = Math.max(rawStringParts.length, stringLikes.length); - for (let i = 0; i < length; i++) { - for (const stringLike of [rawStringParts[i], stringLikes[i]]) { - if (stringLike instanceof GrammyParseMode.FormattedString) { - entities.push( - ...stringLike.entities.map((e) => ({ - ...e, - offset: e.offset + text.length, - })), - ); - } - if (stringLike != null) text += stringLike.toString(); - } - } - return new GrammyParseMode.FormattedString(text, entities); -}; - -export function formatUserChat(ctx: { from?: GrammyTypes.User; chat?: GrammyTypes.Chat }) { - const msg: string[] = []; - if (ctx.from) { - msg.push(ctx.from.first_name); - if (ctx.from.last_name) msg.push(ctx.from.last_name); - if (ctx.from.username) msg.push(`(@${ctx.from.username})`); - if (ctx.from.language_code) msg.push(`(${ctx.from.language_code.toUpperCase()})`); - } - if (ctx.chat) { - if ( - ctx.chat.type === "group" || - ctx.chat.type === "supergroup" || - ctx.chat.type === "channel" - ) { - msg.push("in"); - msg.push(ctx.chat.title); - if ( - (ctx.chat.type === "supergroup" || ctx.chat.type === "channel") && - ctx.chat.username - ) { - msg.push(`(@${ctx.chat.username})`); - } - } - } - return msg.join(" "); -} diff --git a/db/config.ts b/db/config.ts new file mode 100644 index 0000000..8793d83 --- /dev/null +++ b/db/config.ts @@ -0,0 +1,53 @@ +import * as SdApi from "../common/sdApi.ts"; +import { db } from "./db.ts"; + +export interface ConfigData { + adminUsernames: string[]; + pausedReason: string | null; + maxUserJobs: number; + maxJobs: number; + defaultParams?: Partial< + | SdApi.components["schemas"]["StableDiffusionProcessingTxt2Img"] + | SdApi.components["schemas"]["StableDiffusionProcessingImg2Img"] + >; + sdInstances: SdInstanceData[]; +} + +export interface SdInstanceData { + id: string; + name?: string; + api: { url: string; auth?: string }; + maxResolution: number; +} + +const getDefaultConfig = (): ConfigData => ({ + adminUsernames: Deno.env.get("TG_ADMIN_USERS")?.split(",") ?? [], + pausedReason: null, + maxUserJobs: 3, + maxJobs: 20, + defaultParams: { + batch_size: 1, + n_iter: 1, + width: 512, + height: 768, + steps: 30, + cfg_scale: 10, + negative_prompt: "boring_e621_fluffyrock_v4 boring_e621_v4", + }, + sdInstances: [ + { + id: "local", + api: { url: Deno.env.get("SD_API_URL") ?? "http://127.0.0.1:7860/" }, + maxResolution: 1024 * 1024, + }, + ], +}); + +export async function getConfig(): Promise { + const configEntry = await db.get(["config"]); + return configEntry.value ?? getDefaultConfig(); +} + +export async function setConfig(config: ConfigData): Promise { + await db.set(["config"], config); +} diff --git a/db/jobStore.ts b/db/jobStore.ts index acc986d..bc988d2 100644 --- a/db/jobStore.ts +++ b/db/jobStore.ts @@ -1,9 +1,8 @@ import { GrammyTypes, IKV } from "../deps.ts"; -import { SdTxt2ImgInfo } from "../common/sdApi.ts"; import { PngInfo } from "../common/parsePngInfo.ts"; import { db } from "./db.ts"; -export interface JobSchema { +export interface GenerationSchema { task: | { type: "txt2img"; @@ -12,38 +11,50 @@ export interface JobSchema { | { type: "img2img"; params: Partial; - fileId: string; + fileId?: string; }; from: GrammyTypes.User; chat: GrammyTypes.Chat; - requestMessageId: number; - status: - | { - type: "waiting"; - message?: GrammyTypes.Message.TextMessage; - lastErrorDate?: Date; - } - | { - type: "processing"; - progress: number; - worker: string; - updatedDate: Date; - message?: GrammyTypes.Message.TextMessage; - } - | { - type: "done"; - info?: SdTxt2ImgInfo; - startDate?: Date; - endDate?: Date; - }; + requestMessageId?: number; + status: { + info?: SdGenerationInfo; + startDate?: Date; + endDate?: Date; + }; } -type JobIndices = { - "status.type": JobSchema["status"]["type"]; -}; +/** + * `info` field in generation response is a serialized json string of this shape. + */ +export interface SdGenerationInfo { + prompt: string; + all_prompts: string[]; + negative_prompt: string; + all_negative_prompts: string[]; + seed: number; + all_seeds: number[]; + subseed: number; + all_subseeds: number[]; + subseed_strength: number; + width: number; + height: number; + sampler_name: string; + cfg_scale: number; + steps: number; + batch_size: number; + restore_faces: boolean; + face_restoration_model: unknown; + sd_model_hash: string; + seed_resize_from_w: number; + seed_resize_from_h: number; + denoising_strength: number; + extra_generation_params: Record; + index_of_first_image: number; + infotexts: string[]; + styles: unknown[]; + job_timestamp: string; + clip_skip: number; + is_using_inpainting_conditioning: boolean; +} -export const jobStore = new IKV.Store(db, "job", { - indices: { - "status.type": { getValue: (job) => job.status.type }, - }, -}); +export const generationStore = new IKV.Store(db, "job", { indices: {} }); diff --git a/deps.ts b/deps.ts index d581cee..5b1851d 100644 --- a/deps.ts +++ b/deps.ts @@ -1,18 +1,20 @@ export * as Log from "https://deno.land/std@0.201.0/log/mod.ts"; export * as Async from "https://deno.land/std@0.201.0/async/mod.ts"; -export * as FmtDuration from "https://deno.land/std@0.201.0/fmt/duration.ts"; -export * as Collections from "https://deno.land/std@0.201.0/collections/mod.ts"; -export * as Base64 from "https://deno.land/std@0.201.0/encoding/base64.ts"; +export * as FmtDuration from "https://deno.land/std@0.202.0/fmt/duration.ts"; +export * as Collections from "https://deno.land/std@0.202.0/collections/mod.ts"; +export * as Base64 from "https://deno.land/std@0.202.0/encoding/base64.ts"; export * as AsyncX from "https://deno.land/x/async@v2.0.2/mod.ts"; export * as ULID from "https://deno.land/x/ulid@v0.3.0/mod.ts"; export * as IKV from "https://deno.land/x/indexed_kv@v0.3.0/mod.ts"; -export * as Grammy from "https://deno.land/x/grammy@v1.18.1/mod.ts"; -export * as GrammyTypes from "https://deno.land/x/grammy_types@v3.2.0/mod.ts"; +export * as KVMQ from "https://deno.land/x/kvmq@v0.1.0/mod.ts"; +export * as Grammy from "https://deno.land/x/grammy@v1.18.3/mod.ts"; +export * as GrammyTypes from "https://deno.land/x/grammy_types@v3.2.2/mod.ts"; export * as GrammyAutoQuote from "https://deno.land/x/grammy_autoquote@v1.1.2/mod.ts"; -export * as GrammyParseMode from "https://deno.land/x/grammy_parse_mode@1.7.1/mod.ts"; +export * as GrammyParseMode from "https://deno.land/x/grammy_parse_mode@1.8.1/mod.ts"; export * as GrammyKvStorage from "https://deno.land/x/grammy_storages@v2.3.1/denokv/src/mod.ts"; -export * as GrammyStatelessQ from "https://deno.land/x/grammy_stateless_question_alpha@v3.0.3/mod.ts"; +export * as GrammyStatelessQ from "https://deno.land/x/grammy_stateless_question_alpha@v3.0.4/mod.ts"; export * as GrammyFiles from "https://deno.land/x/grammy_files@v1.0.4/mod.ts"; export * as FileType from "https://esm.sh/file-type@18.5.0"; export { default as pngChunksExtract } from "https://esm.sh/png-chunks-extract@1.0.0"; export { decode as pngChunkTextDecode } from "https://esm.sh/png-chunk-text@1.0.0"; +export { default as createOpenApiClient } from "https://esm.sh/openapi-fetch@0.7.6"; diff --git a/tasks/generationQueue.ts b/tasks/generationQueue.ts new file mode 100644 index 0000000..d359518 --- /dev/null +++ b/tasks/generationQueue.ts @@ -0,0 +1,409 @@ +import { bot } from "../bot/mod.ts"; +import { PngInfo } from "../common/parsePngInfo.ts"; +import * as SdApi from "../common/sdApi.ts"; +import { formatUserChat } from "../common/formatUserChat.ts"; +import { getConfig, SdInstanceData } from "../db/config.ts"; +import { db } from "../db/db.ts"; +import { generationStore, SdGenerationInfo } from "../db/jobStore.ts"; +import { + Async, + AsyncX, + Base64, + createOpenApiClient, + FileType, + FmtDuration, + Grammy, + GrammyParseMode, + GrammyTypes, + KVMQ, + Log, +} from "../deps.ts"; +import { formatOrdinal } from "../common/formatOrdinal.ts"; +import { deadline } from "../common/deadline.ts"; +import { SdError } from "../common/SdError.ts"; + +const logger = () => Log.getLogger(); + +interface GenerationJob { + task: + | { + type: "txt2img"; + params: Partial; + } + | { + type: "img2img"; + params: Partial; + fileId: string; + }; + from: GrammyTypes.User; + chat: GrammyTypes.Chat; + requestMessage: GrammyTypes.Message; + replyMessage?: GrammyTypes.Message; + sdInstanceId?: string; + progress?: number; +} + +export const generationQueue = new KVMQ.Queue(db, "jobQueue"); + +export const activeGenerationWorkers = new Map>(); + +/** + * Periodically restarts stable diffusion generation workers if they become online. + */ +export async function restartGenerationWorkers() { + while (true) { + const config = await getConfig(); + + for (const sdInstance of config.sdInstances) { + const activeWorker = activeGenerationWorkers.get(sdInstance.id); + if (activeWorker?.isProcessing) continue; + + const activeWorkerSdClient = createOpenApiClient({ + baseUrl: sdInstance.api.url, + headers: { "Authorization": sdInstance.api.auth }, + }); + + // check if worker is up + + const activeWorkerStatus = await activeWorkerSdClient.GET("/sdapi/v1/memory", { + signal: deadline(10_000), + }) + .then((response) => { + if (!response.data) { + throw new SdError("Failed to get worker status", response.response, response.error); + } + return response; + }) + .catch((error) => { + logger().warning(`Worker ${sdInstance.id} is down: ${error}`); + }); + + if (!activeWorkerStatus?.data) { + continue; + } + + const newWorker = generationQueue.createWorker(({ state, setState }) => + processGenerationJob(state, setState, sdInstance) + ); + + logger().info(`Started worker ${sdInstance.id}`); + + newWorker.processJobs(); + + newWorker.addEventListener("error", (e) => { + logger().error(`Job failed for ${formatUserChat(e.detail.job.state)}: ${e.detail.error}`); + bot.api.sendMessage( + e.detail.job.state.requestMessage.chat.id, + `Generating failed: ${e.detail.error}`, + { + reply_to_message_id: e.detail.job.state.requestMessage.message_id, + }, + ).catch(() => undefined); + // TODO: only stop worker if error is network error + newWorker.stopProcessing(); + }); + + activeGenerationWorkers.set(sdInstance.id, newWorker); + } + await Async.delay(60_000); + } +} + +async function processGenerationJob( + job: GenerationJob, + setJob: (state: GenerationJob) => Promise, + sdInstance: SdInstanceData, +) { + logger().debug(`Job started for ${formatUserChat(job)} using ${sdInstance.id}`); + const startDate = new Date(); + job.sdInstanceId = sdInstance.id; + await setJob(job); + + const config = await getConfig(); + const workerSdClient = createOpenApiClient({ + baseUrl: sdInstance.api.url, + headers: { "Authorization": sdInstance.api.auth }, + }); + + // if there is already a status message and its older than 30 seconds + if (job.replyMessage && (Date.now() - job.replyMessage.date * 1000) > 30_000) { + // try to delete it + await bot.api.deleteMessage(job.replyMessage.chat.id, job.replyMessage.message_id) + .catch(() => undefined); + job.replyMessage = undefined; + await setJob(job); + } + + await bot.api.sendChatAction(job.chat.id, "upload_photo", { maxAttempts: 1 }) + .catch(() => undefined); + + // if now there is no status message + if (!job.replyMessage) { + // send a new status message + job.replyMessage = await bot.api.sendMessage( + job.chat.id, + `Generating your prompt now... 0% using ${sdInstance.name}`, + { reply_to_message_id: job.requestMessage.message_id }, + ).catch((err) => { + // if the request message (the message we are replying to) was deleted + if (err instanceof Grammy.GrammyError && err.message.match(/repl(y|ied)/)) { + // set the status message to undefined + return undefined; + } + throw err; + }); + await setJob(job); + } else { + // edit the existing status message + await bot.api.editMessageText( + job.replyMessage.chat.id, + job.replyMessage.message_id, + `Generating your prompt now... 0% using ${sdInstance.name}`, + { maxAttempts: 1 }, + ).catch(() => undefined); + } + + // if we don't have a status message (it failed sending because request was deleted) + if (!job.replyMessage) { + // cancel the job + logger().info(`Job cancelled for ${formatUserChat(job)}`); + return; + } + + // reduce size if worker can't handle the resolution + const size = limitSize( + { ...config.defaultParams, ...job.task.params }, + sdInstance.maxResolution, + ); + function limitSize( + { width, height }: { width?: number; height?: number }, + maxResolution: number, + ): { width?: number; height?: number } { + if (!width || !height) return {}; + const ratio = width / height; + if (width * height > maxResolution) { + return { + width: Math.trunc(Math.sqrt(maxResolution * ratio)), + height: Math.trunc(Math.sqrt(maxResolution / ratio)), + }; + } + return { width, height }; + } + + // start generating the image + const responsePromise = job.task.type === "txt2img" + ? workerSdClient.POST("/sdapi/v1/txt2img", { + body: { + ...config.defaultParams, + ...job.task.params, + ...size, + negative_prompt: job.task.params.negative_prompt + ? job.task.params.negative_prompt + : config.defaultParams?.negative_prompt, + }, + }) + : job.task.type === "img2img" + ? workerSdClient.POST("/sdapi/v1/img2img", { + body: { + ...config.defaultParams, + ...job.task.params, + ...size, + negative_prompt: job.task.params.negative_prompt + ? job.task.params.negative_prompt + : config.defaultParams?.negative_prompt, + init_images: [ + Base64.encode( + await fetch( + `https://api.telegram.org/file/bot${bot.token}/${await bot.api.getFile( + job.task.fileId, + ).then((file) => file.file_path)}`, + ).then((resp) => resp.arrayBuffer()), + ), + ], + }, + }) + : undefined; + + if (!responsePromise) { + throw new Error(`Unknown task type: ${job.task.type}`); + } + + // poll for progress while the generation request is pending + while (await AsyncX.promiseState(responsePromise) === "pending") { + await Async.delay(3000); + const progressResponse = await workerSdClient.GET("/sdapi/v1/progress", { + params: {}, + signal: deadline(15_000), + }); + if (!progressResponse.data) { + throw new SdError( + "Failed to get progress", + progressResponse.response, + progressResponse.error, + ); + } + job.progress = progressResponse.data.progress; + await setJob(job); + await bot.api.sendChatAction(job.chat.id, "upload_photo", { maxAttempts: 1 }) + .catch(() => undefined); + if (job.replyMessage) { + await bot.api.editMessageText( + job.replyMessage.chat.id, + job.replyMessage.message_id, + `Generating your prompt now... ${ + (progressResponse.data.progress * 100).toFixed(0) + }% using ${sdInstance.name}`, + { maxAttempts: 1 }, + ).catch(() => undefined); + } + } + const response = await responsePromise; + + if (!response.data) { + throw new SdError("Generating image failed", response.response, response.error); + } + + if (!response.data.images?.length) { + throw new Error("No images returned from SD"); + } + + // info field is a json serialized string so we need to parse it + const info: SdGenerationInfo = JSON.parse(response.data.info); + + // change status message to uploading images + await bot.api.editMessageText( + job.replyMessage.chat.id, + job.replyMessage.message_id, + `Uploading your images...`, + { maxAttempts: 1 }, + ).catch(() => undefined); + + // render the caption + // const detailedReply = Object.keys(job.value.params).filter((key) => key !== "prompt").length > 0; + const detailedReply = true; + const jobDurationMs = Math.trunc((Date.now() - startDate.getTime()) / 1000) * 1000; + const { bold, fmt } = GrammyParseMode; + const caption = fmt([ + `${info.prompt}\n`, + ...detailedReply + ? [ + info.negative_prompt ? fmt`${bold("Negative prompt:")} ${info.negative_prompt}\n` : "", + fmt`${bold("Steps:")} ${info.steps}, `, + fmt`${bold("Sampler:")} ${info.sampler_name}, `, + fmt`${bold("CFG scale:")} ${info.cfg_scale}, `, + fmt`${bold("Seed:")} ${info.seed}, `, + fmt`${bold("Size")}: ${info.width}x${info.height}, `, + fmt`${bold("Worker")}: ${sdInstance.id}, `, + fmt`${bold("Time taken")}: ${FmtDuration.format(jobDurationMs, { ignoreZero: true })}`, + ] + : [], + ]); + + // sending images loop because telegram is unreliable and it would be a shame to lose the images + // TODO: separate queue for sending images + let sendMediaAttempt = 0; + let resultMessages: GrammyTypes.Message.MediaMessage[] | undefined; + while (true) { + sendMediaAttempt++; + await bot.api.sendChatAction(job.chat.id, "upload_photo", { maxAttempts: 1 }) + .catch(() => undefined); + + // parse files from reply JSON + const inputFiles = await Promise.all( + response.data.images.map(async (imageBase64, idx) => { + const imageBuffer = Base64.decode(imageBase64); + const imageType = await FileType.fileTypeFromBuffer(imageBuffer); + if (!imageType) throw new Error("Unknown file type returned from worker"); + return Grammy.InputMediaBuilder.photo( + new Grammy.InputFile(imageBuffer, `image${idx}.${imageType.ext}`), + // if it can fit, add caption for first photo + idx === 0 && caption.text.length <= 1024 + ? { caption: caption.text, caption_entities: caption.entities } + : undefined, + ); + }), + ); + + // send the result to telegram + try { + resultMessages = await bot.api.sendMediaGroup(job.chat.id, inputFiles, { + reply_to_message_id: job.requestMessage.message_id, + maxAttempts: 5, + }); + break; + } catch (err) { + logger().warning( + `Sending images (attempt ${sendMediaAttempt}) for ${ + formatUserChat(job) + } using ${sdInstance.id} failed: ${err}`, + ); + if (sendMediaAttempt >= 6) throw err; + // wait 2 * 5 seconds before retrying + for (let i = 0; i < 2; i++) { + await bot.api.sendChatAction(job.chat.id, "upload_photo", { maxAttempts: 1 }) + .catch(() => undefined); + await Async.delay(5000); + } + } + } + + // send caption in separate message if it couldn't fit + if (caption.text.length > 1024 && caption.text.length <= 4096) { + await bot.api.sendMessage(job.chat.id, caption.text, { + reply_to_message_id: resultMessages[0].message_id, + entities: caption.entities, + }); + } + + // delete the status message + await bot.api.deleteMessage(job.replyMessage.chat.id, job.replyMessage.message_id) + .catch(() => undefined); + job.replyMessage = undefined; + await setJob(job); + + // save to generation storage + generationStore.create({ + task: { type: job.task.type, params: job.task.params }, + from: job.from, + chat: job.chat, + status: { + startDate, + endDate: new Date(), + info: info, + }, + }); + + logger().debug( + `Job finished for ${formatUserChat(job)} using ${sdInstance.id}${ + sendMediaAttempt > 1 ? ` after ${sendMediaAttempt} attempts` : "" + }`, + ); +} + +/** + * Updates the status message of all jobs in the queue. + */ +export async function handleGenerationUpdates() { + while (true) { + const jobs = await generationQueue.getAllJobs(); + let index = 0; + for (const job of jobs) { + if (job.lockUntil > new Date()) { + // job is currently being processed, the worker will update its status message + continue; + } + if (!job.state.replyMessage) { + // no status message, nothing to update + continue; + } + index++; + await bot.api.editMessageText( + job.state.replyMessage.chat.id, + job.state.replyMessage.message_id, + `You are ${formatOrdinal(index)} in queue.`, + { maxAttempts: 1 }, + ).catch(() => undefined); + } + await Async.delay(3000); + } +} diff --git a/tasks/mod.ts b/tasks/mod.ts index 1f26487..796de3d 100644 --- a/tasks/mod.ts +++ b/tasks/mod.ts @@ -1,13 +1,8 @@ -import { pingWorkers } from "./pingWorkers.ts"; -import { processJobs } from "./processJobs.ts"; -import { returnHangedJobs } from "./returnHangedJobs.ts"; -import { updateJobStatusMsgs } from "./updateJobStatusMsgs.ts"; +import { handleGenerationUpdates, restartGenerationWorkers } from "./generationQueue.ts"; export async function runAllTasks() { await Promise.all([ - processJobs(), - updateJobStatusMsgs(), - returnHangedJobs(), - pingWorkers(), + restartGenerationWorkers(), + handleGenerationUpdates(), ]); } diff --git a/tasks/pingWorkers.ts b/tasks/pingWorkers.ts deleted file mode 100644 index 4cfcd2d..0000000 --- a/tasks/pingWorkers.ts +++ /dev/null @@ -1,32 +0,0 @@ -import { Async, Log } from "../deps.ts"; -import { getGlobalSession } from "../bot/session.ts"; -import { sdGetConfig } from "../common/sdApi.ts"; - -const logger = () => Log.getLogger(); - -export const runningWorkers = new Set(); - -/** - * Periodically ping the workers to see if they are alive. - */ -export async function pingWorkers(): Promise { - while (true) { - try { - const config = await getGlobalSession(); - for (const worker of config.workers) { - const status = await sdGetConfig(worker.api).catch(() => null); - const wasRunning = runningWorkers.has(worker.id); - if (status) { - runningWorkers.add(worker.id); - if (!wasRunning) logger().info(`Worker ${worker.id} is online`); - } else { - runningWorkers.delete(worker.id); - if (wasRunning) logger().warning(`Worker ${worker.id} went offline`); - } - } - await Async.delay(60 * 1000); - } catch (err) { - logger().warning(`Pinging workers failed: ${err}`); - } - } -} diff --git a/tasks/processJobs.ts b/tasks/processJobs.ts deleted file mode 100644 index 2f7edd9..0000000 --- a/tasks/processJobs.ts +++ /dev/null @@ -1,369 +0,0 @@ -import { - Async, - Base64, - FileType, - FmtDuration, - Grammy, - GrammyParseMode, - GrammyTypes, - IKV, - Log, -} from "../deps.ts"; -import { bot } from "../bot/mod.ts"; -import { getGlobalSession, GlobalData, WorkerData } from "../bot/session.ts"; -import { fmt, formatUserChat } from "../common/utils.ts"; -import { - SdApiError, - sdImg2Img, - SdProgressResponse, - SdResponse, - sdTxt2Img, -} from "../common/sdApi.ts"; -import { JobSchema, jobStore } from "../db/jobStore.ts"; -import { runningWorkers } from "./pingWorkers.ts"; - -const logger = () => Log.getLogger(); - -/** - * Sends waiting jobs to workers. - */ -export async function processJobs(): Promise { - const busyWorkers = new Set(); - while (true) { - await new Promise((resolve) => setTimeout(resolve, 1000)); - - try { - const jobs = await jobStore.getBy("status.type", { value: "waiting" }); - // get first waiting job which hasn't errored in last minute - const job = jobs.find((job) => - job.value.status.type === "waiting" && - (job.value.status.lastErrorDate?.getTime() ?? 0) < Date.now() - 60_000 - ); - if (!job) continue; - - // find a worker to handle the job - const config = await getGlobalSession(); - const worker = config.workers?.find((worker) => - runningWorkers.has(worker.id) && - !busyWorkers.has(worker.id) - ); - if (!worker) continue; - - // process the job - await job.update((value) => ({ - ...value, - status: { - type: "processing", - progress: 0, - worker: worker.id, - updatedDate: new Date(), - message: job.value.status.type !== "done" ? job.value.status.message : undefined, - }, - })); - busyWorkers.add(worker.id); - processJob(job, worker, config) - .catch(async (err) => { - logger().error( - `Job failed for ${formatUserChat(job.value)} via ${worker.id}: ${err}`, - ); - if (job.value.status.type === "processing" && job.value.status.message) { - await bot.api.deleteMessage( - job.value.status.message.chat.id, - job.value.status.message.message_id, - ).catch(() => undefined); - } - if (err instanceof Grammy.GrammyError || err instanceof SdApiError) { - await bot.api.sendMessage( - job.value.chat.id, - `Failed to generate your prompt using ${worker.name}: ${err.message}`, - { reply_to_message_id: job.value.requestMessageId }, - ).catch(() => undefined); - await job.update({ status: { type: "waiting", lastErrorDate: new Date() } }) - .catch(() => undefined); - } - if ( - err instanceof SdApiError && - ( - err.statusCode === 0 /* Network error */ || - err.statusCode === 404 || - err.statusCode === 401 - ) - ) { - runningWorkers.delete(worker.id); - logger().warning( - `Worker ${worker.id} was marked as offline because of network error`, - ); - } - await job.delete().catch(() => undefined); - if (!(err instanceof Grammy.GrammyError) || err.error_code !== 403 /* blocked bot */) { - await jobStore.create(job.value); - } - }) - .finally(() => busyWorkers.delete(worker.id)); - } catch (err) { - logger().warning(`Processing jobs failed: ${err}`); - } - } -} - -async function processJob(job: IKV.Model, worker: WorkerData, config: GlobalData) { - logger().debug( - `Job started for ${formatUserChat(job.value)} using ${worker.id}`, - ); - const startDate = new Date(); - - // if there is already a status message and its older than 10 seconds - if ( - job.value.status.type === "processing" && job.value.status.message && - (Date.now() - job.value.status.message.date * 1000) > 10 * 1000 - ) { - // delete it - await bot.api.deleteMessage( - job.value.status.message.chat.id, - job.value.status.message.message_id, - ).catch(() => undefined); - await job.update((value) => ({ - ...value, - status: { ...value.status, message: undefined }, - })); - } - - // we have to check if job is still processing at every step because TypeScript - if (job.value.status.type === "processing") { - await bot.api.sendChatAction(job.value.chat.id, "upload_photo", { maxAttempts: 1 }) - .catch(() => undefined); - // if now there is no status message - if (!job.value.status.message) { - // send a new status message - const statusMessage = await bot.api.sendMessage( - job.value.chat.id, - `Generating your prompt now... 0% using ${worker.name}`, - { reply_to_message_id: job.value.requestMessageId }, - ).catch((err) => { - // if the request message (the message we are replying to) was deleted - if (err instanceof Grammy.GrammyError && err.message.match(/repl(y|ied)/)) { - // jest set the status message to undefined - return undefined; - } - throw err; - }); - await job.update((value) => ({ - ...value, - status: { ...value.status, message: statusMessage }, - })); - } else { - // edit the existing status message - await bot.api.editMessageText( - job.value.status.message.chat.id, - job.value.status.message.message_id, - `Generating your prompt now... 0% using ${worker.name}`, - { maxAttempts: 1 }, - ).catch(() => undefined); - } - } - - // if we don't have a status message (it failed sending because request was deleted) - if (job.value.status.type === "processing" && !job.value.status.message) { - // cancel the job - await job.delete(); - logger().info(`Job cancelled for ${formatUserChat(job.value)}`); - return; - } - - // reduce size if worker can't handle the resolution - const size = limitSize( - { ...config.defaultParams, ...job.value.task.params }, - worker.maxResolution, - ); - - // process the job - const handleProgress = async (progress: SdProgressResponse) => { - // Important: don't let any errors escape this function - if (job.value.status.type === "processing" && job.value.status.message) { - await Promise.all([ - bot.api.sendChatAction(job.value.chat.id, "upload_photo", { maxAttempts: 1 }), - progress.progress > job.value.status.progress && bot.api.editMessageText( - job.value.status.message.chat.id, - job.value.status.message.message_id, - `Generating your prompt now... ${ - (progress.progress * 100).toFixed(0) - }% using ${worker.name}`, - { maxAttempts: 1 }, - ), - job.update((value) => ({ - ...value, - status: { - type: "processing", - progress: progress.progress, - worker: worker.id, - updatedDate: new Date(), - message: value.status.type !== "done" ? value.status.message : undefined, - }, - }), { maxAttempts: 1 }), - ]).catch((err) => - logger().warning( - `Updating job status for ${formatUserChat(job.value)} using ${worker.id} failed: ${err}`, - ) - ); - } - }; - let response: SdResponse; - const taskType = job.value.task.type; // don't narrow this to never pls typescript - switch (job.value.task.type) { - case "txt2img": - response = await sdTxt2Img( - worker.api, - { - ...config.defaultParams, - ...job.value.task.params, - ...size, - negative_prompt: job.value.task.params.negative_prompt - ? job.value.task.params.negative_prompt - : config.defaultParams?.negative_prompt, - }, - handleProgress, - ); - break; - case "img2img": { - const file = await bot.api.getFile(job.value.task.fileId); - const fileUrl = `https://api.telegram.org/file/bot${bot.token}/${file.file_path}`; - const fileBuffer = await fetch(fileUrl).then((resp) => resp.arrayBuffer()); - const fileBase64 = Base64.encode(fileBuffer); - response = await sdImg2Img( - worker.api, - { ...config.defaultParams, ...job.value.task.params, ...size, init_images: [fileBase64] }, - handleProgress, - ); - break; - } - default: - throw new Error(`Unknown task type: ${taskType}`); - } - - // change status message to uploading images - if (job.value.status.type === "processing" && job.value.status.message) { - await bot.api.editMessageText( - job.value.status.message.chat.id, - job.value.status.message.message_id, - `Uploading your images...`, - { maxAttempts: 1 }, - ).catch(() => undefined); - } - - // render the caption - // const detailedReply = Object.keys(job.value.params).filter((key) => key !== "prompt").length > 0; - const detailedReply = true; - const jobDurationMs = Math.trunc((Date.now() - startDate.getTime()) / 1000) * 1000; - const { bold } = GrammyParseMode; - const caption = fmt([ - `${response.info.prompt}\n`, - ...detailedReply - ? [ - response.info.negative_prompt - ? fmt`${bold("Negative prompt:")} ${response.info.negative_prompt}\n` - : "", - fmt`${bold("Steps:")} ${response.info.steps}, `, - fmt`${bold("Sampler:")} ${response.info.sampler_name}, `, - fmt`${bold("CFG scale:")} ${response.info.cfg_scale}, `, - fmt`${bold("Seed:")} ${response.info.seed}, `, - fmt`${bold("Size")}: ${response.info.width}x${response.info.height}, `, - fmt`${bold("Worker")}: ${worker.id}, `, - fmt`${bold("Time taken")}: ${FmtDuration.format(jobDurationMs, { ignoreZero: true })}`, - ] - : [], - ]); - - // sending images loop because telegram is unreliable and it would be a shame to lose the images - let sendMediaAttempt = 0; - let resultMessages: GrammyTypes.Message.MediaMessage[] | undefined; - while (true) { - sendMediaAttempt++; - await bot.api.sendChatAction(job.value.chat.id, "upload_photo", { maxAttempts: 1 }) - .catch(() => undefined); - - // parse files from reply JSON - const inputFiles = await Promise.all( - response.images.map(async (imageBase64, idx) => { - const imageBuffer = Base64.decode(imageBase64); - const imageType = await FileType.fileTypeFromBuffer(imageBuffer); - if (!imageType) throw new Error("Unknown file type returned from worker"); - return Grammy.InputMediaBuilder.photo( - new Grammy.InputFile(imageBuffer, `image${idx}.${imageType.ext}`), - // if it can fit, add caption for first photo - idx === 0 && caption.text.length <= 1024 - ? { caption: caption.text, caption_entities: caption.entities } - : undefined, - ); - }), - ); - - // send the result to telegram - try { - resultMessages = await bot.api.sendMediaGroup(job.value.chat.id, inputFiles, { - reply_to_message_id: job.value.requestMessageId, - maxAttempts: 5, - }); - break; - } catch (err) { - logger().warning( - `Sending images (attempt ${sendMediaAttempt}) for ${ - formatUserChat(job.value) - } using ${worker.id} failed: ${err}`, - ); - if (sendMediaAttempt >= 6) throw err; - // wait 2 * 5 seconds before retrying - for (let i = 0; i < 2; i++) { - await bot.api.sendChatAction(job.value.chat.id, "upload_photo", { maxAttempts: 1 }) - .catch(() => undefined); - await Async.delay(5000); - } - } - } - - // send caption in separate message if it couldn't fit - if (caption.text.length > 1024 && caption.text.length <= 4096) { - await bot.api.sendMessage(job.value.chat.id, caption.text, { - reply_to_message_id: resultMessages[0].message_id, - entities: caption.entities, - }); - } - - // delete the status message - if (job.value.status.type === "processing" && job.value.status.message) { - await bot.api.deleteMessage( - job.value.status.message.chat.id, - job.value.status.message.message_id, - ).catch(() => undefined); - await job.update((value) => ({ - ...value, - status: { ...value.status, message: undefined }, - })); - } - - // update job to status done - await job.update((value) => ({ - ...value, - status: { type: "done", info: response.info, startDate, endDate: new Date() }, - })); - - logger().debug( - `Job finished for ${formatUserChat(job.value)} using ${worker.id}${ - sendMediaAttempt > 1 ? ` after ${sendMediaAttempt} attempts` : "" - }`, - ); -} - -function limitSize( - { width, height }: { width?: number; height?: number }, - maxResolution: number, -): { width?: number; height?: number } { - if (!width || !height) return {}; - const ratio = width / height; - if (width * height > maxResolution) { - return { - width: Math.trunc(Math.sqrt(maxResolution * ratio)), - height: Math.trunc(Math.sqrt(maxResolution / ratio)), - }; - } - return { width, height }; -} diff --git a/tasks/returnHangedJobs.ts b/tasks/returnHangedJobs.ts deleted file mode 100644 index 635204f..0000000 --- a/tasks/returnHangedJobs.ts +++ /dev/null @@ -1,40 +0,0 @@ -import { FmtDuration, Log } from "../deps.ts"; -import { formatUserChat } from "../common/utils.ts"; -import { jobStore } from "../db/jobStore.ts"; - -const logger = () => Log.getLogger(); - -/** - * Returns hanged jobs to the queue. - */ -export async function returnHangedJobs(): Promise { - while (true) { - try { - await new Promise((resolve) => setTimeout(resolve, 5000)); - const jobs = await jobStore.getBy("status.type", { value: "processing" }); - for (const job of jobs) { - if (job.value.status.type !== "processing") continue; - // if job wasn't updated for 2 minutes, return it to the queue - const timeSinceLastUpdateMs = Date.now() - job.value.status.updatedDate.getTime(); - if (timeSinceLastUpdateMs > 2 * 60 * 1000) { - await job.update((value) => ({ - ...value, - status: { - type: "waiting", - message: value.status.type !== "done" ? value.status.message : undefined, - }, - })); - logger().warning( - `Job for ${formatUserChat(job.value)} was returned to the queue because it hanged for ${ - FmtDuration.format(Math.trunc(timeSinceLastUpdateMs / 1000) * 1000, { - ignoreZero: true, - }) - }`, - ); - } - } - } catch (err) { - logger().warning(`Returning hanged jobs failed: ${err}`); - } - } -} diff --git a/tasks/updateJobStatusMsgs.ts b/tasks/updateJobStatusMsgs.ts deleted file mode 100644 index 2935acc..0000000 --- a/tasks/updateJobStatusMsgs.ts +++ /dev/null @@ -1,29 +0,0 @@ -import { Log } from "../deps.ts"; -import { bot } from "../bot/mod.ts"; -import { formatOrdinal } from "../common/utils.ts"; -import { jobStore } from "../db/jobStore.ts"; - -const logger = () => Log.getLogger(); - -/** - * Updates status messages for jobs in the queue. - */ -export async function updateJobStatusMsgs(): Promise { - while (true) { - try { - await new Promise((resolve) => setTimeout(resolve, 5000)); - const jobs = await jobStore.getBy("status.type", { value: "waiting" }); - for (const [index, job] of jobs.entries()) { - if (job.value.status.type !== "waiting" || !job.value.status.message) continue; - await bot.api.editMessageText( - job.value.status.message.chat.id, - job.value.status.message.message_id, - `You are ${formatOrdinal(index + 1)} in queue.`, - { maxAttempts: 1 }, - ).catch(() => undefined); - } - } catch (err) { - logger().warning(`Updating job status messages failed: ${err}`); - } - } -}