This commit is contained in:
2023-02-15 05:29:23 +08:00
parent 8e581e3a56
commit 6da9e7e1bb
3 changed files with 177 additions and 164 deletions

1
.gitignore vendored
View File

@@ -6,3 +6,4 @@ node_modules
.output .output
.env .env
dist dist
outputs

339
server.py
View File

@@ -210,179 +210,190 @@ def put_watermark(img, wm_encoder=None):
img = Image.fromarray(img[:, :, ::-1]) img = Image.fromarray(img[:, :, ::-1])
return img return img
import time
import requests
def main(opt): def main_dev(opt):
seed_everything(opt.seed) model_name = '' # 默认模型
model = None # 默认模型
config = None # 默认配置
device = None # 默认设备
while True:
time.sleep(2) # 延时1s执行, 避免cpu占用过高
data = requests.get("http://localhost:3000/api/drawing").json() # 从局域网中获取一组参数
print(data) # [{'model': '768-v-ema', 'prompt': '一只猫', 'watermark': '0'}, {'model': '768-v-ema', 'prompt': '一只狗', 'watermark': '0'}]
# 遍历 data 返回dict
for item in data:
print(item) # {'model': '768-v-ema', 'prompt': '一只猫', 'watermark': '0'}
# 设置参数
if 'prompt' in item: opt.prompt = item['prompt'] # 描述
if 'n_samples' in item: opt.n_samples = item['n_samples'] # 列数
if 'n_rows' in item: opt.n_rows = item['n_rows'] # 行数
if 'scale' in item: opt.scale = item['scale'] # 比例
config = OmegaConf.load(f"{opt.config}") # 如果模型不同,重新加载模型(注意释放内存)
device = torch.device("cuda") if opt.device == "cuda" else torch.device("cpu") if item['model'] != model_name:
model = load_model_from_config(config, f"{opt.ckpt}", device) # 获取环境配置
model_name = item['model']
if opt.plms: opt.config = f'/data/{model_name}.yaml'
sampler = PLMSSampler(model, device=device) opt.ckpt = f'/data/{model_name}.ckpt'
elif opt.dpm: opt.device = 'cuda'
sampler = DPMSolverSampler(model, device=device) print(f"config: {opt.config}", f"ckpt: {opt.ckpt}", f"device: {opt.device}")
else: config = OmegaConf.load(f"{opt.config}")
sampler = DDIMSampler(model, device=device) device = torch.device("cuda") if opt.device == "cuda" else torch.device("cpu")
# 加载模型(到显存)
os.makedirs(opt.outdir, exist_ok=True) print(f"load model: {item['model']}..")
outpath = opt.outdir model_name = item['model']
model = load_model_from_config(config, f"{opt.ckpt}", device)
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") print(f"model_name: {model_name}")
wm = "SDV2" # 使用指定的模型和配置文件进行推理一组参数
wm_encoder = WatermarkEncoder() if opt.plms:
wm_encoder.set_watermark('bytes', wm.encode('utf-8')) sampler = PLMSSampler(model, device=device)
elif opt.dpm:
batch_size = opt.n_samples sampler = DPMSolverSampler(model, device=device)
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size else:
if not opt.from_file: sampler = DDIMSampler(model, device=device)
prompt = opt.prompt # 检查输出目录是否存在
assert prompt is not None os.makedirs(opt.outdir, exist_ok=True)
data = [batch_size * [prompt]] outpath = opt.outdir
# 创建水印编码器
else: wm = "SDV2"
print(f"reading prompts from {opt.from_file}") wm_encoder = WatermarkEncoder()
with open(opt.from_file, "r") as f: wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
data = f.read().splitlines() # x
data = [p for p in data for i in range(opt.repeat)] batch_size = opt.n_samples
data = list(chunk(data, batch_size)) n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
if not opt.from_file:
sample_path = os.path.join(outpath, "samples") prompt = opt.prompt
os.makedirs(sample_path, exist_ok=True) assert prompt is not None
sample_count = 0 data = [batch_size * [prompt]]
base_count = len(os.listdir(sample_path)) else:
grid_count = len(os.listdir(outpath)) - 1 print(f"reading prompts from {opt.from_file}")
with open(opt.from_file, "r") as f:
start_code = None data = f.read().splitlines()
if opt.fixed_code: data = [p for p in data for i in range(opt.repeat)]
start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) data = list(chunk(data, batch_size))
# x
if opt.torchscript or opt.ipex: sample_path = os.path.join(outpath, "samples")
transformer = model.cond_stage_model.model os.makedirs(sample_path, exist_ok=True)
unet = model.model.diffusion_model sample_count = 0
decoder = model.first_stage_model.decoder base_count = len(os.listdir(sample_path))
additional_context = torch.cpu.amp.autocast() if opt.bf16 else nullcontext() grid_count = len(os.listdir(outpath)) - 1
shape = [opt.C, opt.H // opt.f, opt.W // opt.f] # x
start_code = None
if opt.bf16 and not opt.torchscript and not opt.ipex: if opt.fixed_code:
raise ValueError('Bfloat16 is supported only for torchscript+ipex') start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device)
if opt.bf16 and unet.dtype != torch.bfloat16: # x
raise ValueError("Use configs/stable-diffusion/intel/ configs with bf16 enabled if " + if opt.torchscript or opt.ipex:
"you'd like to use bfloat16 with CPU.") transformer = model.cond_stage_model.model
if unet.dtype == torch.float16 and device == torch.device("cpu"): unet = model.model.diffusion_model
raise ValueError("Use configs/stable-diffusion/intel/ configs for your model if you'd like to run it on CPU.") decoder = model.first_stage_model.decoder
additional_context = torch.cpu.amp.autocast() if opt.bf16 else nullcontext()
if opt.ipex: shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
import intel_extension_for_pytorch as ipex if opt.bf16 and not opt.torchscript and not opt.ipex:
bf16_dtype = torch.bfloat16 if opt.bf16 else None raise ValueError('Bfloat16 is supported only for torchscript+ipex')
transformer = transformer.to(memory_format=torch.channels_last) if opt.bf16 and unet.dtype != torch.bfloat16:
transformer = ipex.optimize(transformer, level="O1", inplace=True) raise ValueError("Use configs/stable-diffusion/intel/ configs with bf16 enabled if " +
"you'd like to use bfloat16 with CPU.")
unet = unet.to(memory_format=torch.channels_last) if unet.dtype == torch.float16 and device == torch.device("cpu"):
unet = ipex.optimize(unet, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) raise ValueError("Use configs/stable-diffusion/intel/ configs for your model if you'd like to run it on CPU.")
if opt.ipex:
decoder = decoder.to(memory_format=torch.channels_last) import intel_extension_for_pytorch as ipex
decoder = ipex.optimize(decoder, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) bf16_dtype = torch.bfloat16 if opt.bf16 else None
transformer = transformer.to(memory_format=torch.channels_last)
if opt.torchscript: transformer = ipex.optimize(transformer, level="O1", inplace=True)
with torch.no_grad(), additional_context: unet = unet.to(memory_format=torch.channels_last)
# get UNET scripted unet = ipex.optimize(unet, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype)
if unet.use_checkpoint: decoder = decoder.to(memory_format=torch.channels_last)
raise ValueError("Gradient checkpoint won't work with tracing. " + decoder = ipex.optimize(decoder, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype)
"Use configs/stable-diffusion/intel/ configs for your model or disable checkpoint in your config.") if opt.torchscript:
with torch.no_grad(), additional_context:
img_in = torch.ones(2, 4, 96, 96, dtype=torch.float32) # get UNET scripted
t_in = torch.ones(2, dtype=torch.int64) if unet.use_checkpoint:
context = torch.ones(2, 77, 1024, dtype=torch.float32) raise ValueError("Gradient checkpoint won't work with tracing. " +
scripted_unet = torch.jit.trace(unet, (img_in, t_in, context)) "Use configs/stable-diffusion/intel/ configs for your model or disable checkpoint in your config.")
scripted_unet = torch.jit.optimize_for_inference(scripted_unet) img_in = torch.ones(2, 4, 96, 96, dtype=torch.float32)
print(type(scripted_unet)) t_in = torch.ones(2, dtype=torch.int64)
model.model.scripted_diffusion_model = scripted_unet context = torch.ones(2, 77, 1024, dtype=torch.float32)
scripted_unet = torch.jit.trace(unet, (img_in, t_in, context))
# get Decoder for first stage model scripted scripted_unet = torch.jit.optimize_for_inference(scripted_unet)
samples_ddim = torch.ones(1, 4, 96, 96, dtype=torch.float32) print(type(scripted_unet))
scripted_decoder = torch.jit.trace(decoder, (samples_ddim)) model.model.scripted_diffusion_model = scripted_unet
scripted_decoder = torch.jit.optimize_for_inference(scripted_decoder) # get Decoder for first stage model scripted
print(type(scripted_decoder)) samples_ddim = torch.ones(1, 4, 96, 96, dtype=torch.float32)
model.first_stage_model.decoder = scripted_decoder scripted_decoder = torch.jit.trace(decoder, (samples_ddim))
scripted_decoder = torch.jit.optimize_for_inference(scripted_decoder)
prompts = data[0] print(type(scripted_decoder))
print("Running a forward pass to initialize optimizations") model.first_stage_model.decoder = scripted_decoder
uc = None prompts = data[0]
if opt.scale != 1.0: print("Running a forward pass to initialize optimizations")
uc = model.get_learned_conditioning(batch_size * [""]) uc = None
if isinstance(prompts, tuple): if opt.scale != 1.0:
prompts = list(prompts) uc = model.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
with torch.no_grad(), additional_context: prompts = list(prompts)
for _ in range(3): with torch.no_grad(), additional_context:
c = model.get_learned_conditioning(prompts) for _ in range(3):
samples_ddim, _ = sampler.sample(S=5, c = model.get_learned_conditioning(prompts)
conditioning=c, samples_ddim, _ = sampler.sample(S=5,
batch_size=batch_size,
shape=shape,
verbose=False,
unconditional_guidance_scale=opt.scale,
unconditional_conditioning=uc,
eta=opt.ddim_eta,
x_T=start_code)
print("Running a forward pass for decoder")
for _ in range(3):
x_samples_ddim = model.decode_first_stage(samples_ddim)
precision_scope = autocast if opt.precision=="autocast" or opt.bf16 else nullcontext
with torch.no_grad(), \
precision_scope(opt.device), \
model.ema_scope():
all_samples = list()
for n in trange(opt.n_iter, desc="Sampling"):
for prompts in tqdm(data, desc="data"):
uc = None
if opt.scale != 1.0:
uc = model.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
prompts = list(prompts)
c = model.get_learned_conditioning(prompts)
shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
samples, _ = sampler.sample(S=opt.steps,
conditioning=c, conditioning=c,
batch_size=opt.n_samples, batch_size=batch_size,
shape=shape, shape=shape,
verbose=False, verbose=False,
unconditional_guidance_scale=opt.scale, unconditional_guidance_scale=opt.scale,
unconditional_conditioning=uc, unconditional_conditioning=uc,
eta=opt.ddim_eta, eta=opt.ddim_eta,
x_T=start_code) x_T=start_code)
print("Running a forward pass for decoder")
x_samples = model.decode_first_stage(samples) for _ in range(3):
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) x_samples_ddim = model.decode_first_stage(samples_ddim)
precision_scope = autocast if opt.precision=="autocast" or opt.bf16 else nullcontext
for x_sample in x_samples: with torch.no_grad(), \
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') precision_scope(opt.device), \
img = Image.fromarray(x_sample.astype(np.uint8)) model.ema_scope():
img = put_watermark(img, wm_encoder) all_samples = list()
img.save(os.path.join(sample_path, f"{base_count:05}.png")) for n in trange(opt.n_iter, desc="Sampling"):
base_count += 1 for prompts in tqdm(data, desc="data"):
sample_count += 1 uc = None
if opt.scale != 1.0:
all_samples.append(x_samples) uc = model.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
# additionally, save as grid prompts = list(prompts)
grid = torch.stack(all_samples, 0) c = model.get_learned_conditioning(prompts)
grid = rearrange(grid, 'n b c h w -> (n b) c h w') shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
grid = make_grid(grid, nrow=n_rows) samples, _ = sampler.sample(S=opt.steps,
conditioning=c,
# to image batch_size=opt.n_samples,
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() shape=shape,
grid = Image.fromarray(grid.astype(np.uint8)) verbose=False,
grid = put_watermark(grid, wm_encoder) unconditional_guidance_scale=opt.scale,
grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png')) unconditional_conditioning=uc,
grid_count += 1 eta=opt.ddim_eta,
x_T=start_code)
print(f"Your samples are ready and waiting for you here: \n{outpath} \n" x_samples = model.decode_first_stage(samples)
f" \nEnjoy.") x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
for x_sample in x_samples:
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
img = Image.fromarray(x_sample.astype(np.uint8))
img = put_watermark(img, wm_encoder)
img.save(os.path.join(sample_path, f"{base_count:05}.png"))
base_count += 1
sample_count += 1
all_samples.append(x_samples)
# additionally, save as grid
grid = torch.stack(all_samples, 0)
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
grid = make_grid(grid, nrow=n_rows)
# to image
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
grid = Image.fromarray(grid.astype(np.uint8))
grid = put_watermark(grid, wm_encoder)
grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
grid_count += 1
print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
f" \nEnjoy.")
break
if __name__ == "__main__": if __name__ == "__main__":
opt = parse_args() opt = parse_args()
main(opt) main_dev(opt)

1
venv Symbolic link
View File

@@ -0,0 +1 @@
/data/stablediffusion/venv