>>100159148
import torch
import numpy as np
from diffusers import StableDiffusionXLPipeline, TCDScheduler
from tgate import TgateSDXLLoader
from datetime import datetime
# Enable float16 and move to GPU
pipe = StableDiffusionXLPipeline.from_single_file(
"model here",
custom_pipeline="./scripts/pag.py",
torch_dtype=torch.float16,
use_safetensors=True,
safety_checker=None
)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
# OPTIMISATIONS
pipe = TgateSDXLLoader(pipe, gate_step=6, num_inference_steps=18)
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
pipe.enable_vae_slicing()
pipe.enable_vae_tiling()
pipe.enable_sequential_cpu_offload()
pipe.unet.to(memory_format=torch.channels_last)
# PROMPT
prompt = "raw photo of "
negative_prompt = "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art)1.4, (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name)1.2"
rng = np.random.default_rng()
seed = int(rng.integers(1e6, size=1)[0])
# EXECUTE PIPELINE
image = pipe.tgate(
prompt,
negative_prompt=negative_prompt,
width=832,
height=1216,
eta=0.2,
num_inference_steps=18,
gate_step=6,
pag_scale=5.0,
pag_applied_layers=['mid'],
generator=torch.Generator(device="cuda").manual_seed(seed),
).images[0]
image.save(f"./images/{datetime.isoformat(datetime.now())}_{seed}_.jpg")
This is what I have right now for a simple t2i pipeline.