This commit is contained in:
power88
2024-05-26 18:41:24 +08:00
commit af627f86bc
9 changed files with 151268 additions and 0 deletions

23
install.py Normal file
View File

@@ -0,0 +1,23 @@
import os
import pathlib
import shutil
from huggingface_hub import hf_hub_download
from modules.scripts import basedir
ext_dir = basedir()
fooocus_expansion_path = pathlib.Path(ext_dir) / "models" / "prompt_expansion"
base_model_path = pathlib.Path(ext_dir) / "extensions" / "webui-fooocus-prompt-expansion" / "models"
if not os.path.exists(os.path.join(fooocus_expansion_path, 'pytorch_model.bin')):
try:
print(f'### webui-fooocus-prompt-expansion: Downloading model...')
shutil.copytree(os.path.join(base_model_path), fooocus_expansion_path)
hf_hub_download(repo_id='lllyasviel/misc', filename='fooocus_expansion.bin', local_dir=os.path.join(fooocus_expansion_path), resume_download=True, local_dir_use_symlinks=False)
os.rename(os.path.join(fooocus_expansion_path, 'fooocus_expansion.bin'), os.path.join(fooocus_expansion_path, 'pytorch_model.bin'))
except Exception as e:
print(f'### webui-fooocus-prompt-expansion: Failed to download model...')
print(e)
print(f'### webui-fooocus-prompt-expansion: To enable this custom node, please download the model manually from "https://huggingface.co/lllyasviel/misc/tree/main/fooocus_expansion.bin" and place it in {fooocus_expansion_path}.')
else:
pass

40
models/config.json Normal file
View File

@@ -0,0 +1,40 @@
{
"_name_or_path": "gpt2",
"activation_function": "gelu_new",
"architectures": [
"GPT2LMHeadModel"
],
"attn_pdrop": 0.1,
"bos_token_id": 50256,
"embd_pdrop": 0.1,
"eos_token_id": 50256,
"pad_token_id": 50256,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "gpt2",
"n_ctx": 1024,
"n_embd": 768,
"n_head": 12,
"n_inner": null,
"n_layer": 12,
"n_positions": 1024,
"reorder_and_upcast_attn": false,
"resid_pdrop": 0.1,
"scale_attn_by_inverse_layer_idx": false,
"scale_attn_weights": true,
"summary_activation": null,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": true,
"summary_type": "cls_index",
"summary_use_proj": true,
"task_specific_params": {
"text-generation": {
"do_sample": true,
"max_length": 50
}
},
"torch_dtype": "float32",
"transformers_version": "4.23.0.dev0",
"use_cache": true,
"vocab_size": 50257
}

50001
models/merges.txt Normal file

File diff suppressed because it is too large Load Diff

642
models/positive.txt Normal file
View File

@@ -0,0 +1,642 @@
abundant
accelerated
accepted
accepting
acclaimed
accomplished
acknowledged
activated
adapted
adjusted
admirable
adorable
adorned
advanced
adventurous
advocated
aesthetic
affirmed
affluent
agile
aimed
aligned
alive
altered
amazing
ambient
amplified
analytical
animated
appealing
applauded
appreciated
ardent
aromatic
arranged
arresting
articulate
artistic
associated
assured
astonishing
astounding
atmosphere
attempted
attentive
attractive
authentic
authoritative
awarded
awesome
backed
background
baked
balance
balanced
balancing
beaten
beautiful
beloved
beneficial
benevolent
best
bestowed
blazing
blended
blessed
boosted
borne
brave
breathtaking
brewed
bright
brilliant
brought
built
burning
calm
calmed
candid
caring
carried
catchy
celebrated
celestial
certain
championed
changed
charismatic
charming
chased
cheered
cheerful
cherished
chic
chosen
cinematic
clad
classic
classy
clear
coached
coherent
collected
color
colorful
colors
colossal
combined
comforting
commanding
committed
compassionate
compatible
complete
complex
complimentary
composed
composition
comprehensive
conceived
conferred
confident
connected
considerable
considered
consistent
conspicuous
constructed
constructive
contemplated
contemporary
content
contrasted
conveyed
cooked
cool
coordinated
coupled
courageous
coveted
cozy
created
creative
credited
crisp
critical
cultivated
cured
curious
current
customized
cute
daring
darling
dazzling
decorated
decorative
dedicated
deep
defended
definitive
delicate
delightful
delivered
depicted
designed
desirable
desired
destined
detail
detailed
determined
developed
devoted
devout
diligent
direct
directed
discovered
dispatched
displayed
distilled
distinct
distinctive
distinguished
diverse
divine
dramatic
draped
dreamed
driven
dynamic
earnest
eased
ecstatic
educated
effective
elaborate
elegant
elevated
elite
eminent
emotional
empowered
empowering
enchanted
encouraged
endorsed
endowed
enduring
energetic
engaging
enhanced
enigmatic
enlightened
enormous
enticing
envisioned
epic
esteemed
eternal
everlasting
evolved
exalted
examining
excellent
exceptional
exciting
exclusive
exemplary
exotic
expansive
exposed
expressive
exquisite
extended
extraordinary
extremely
fabulous
facilitated
fair
faithful
famous
fancy
fantastic
fascinating
fashionable
fashioned
favorable
favored
fearless
fermented
fertile
festive
fiery
fine
finest
firm
fixed
flaming
flashing
flashy
flavored
flawless
flourishing
flowing
focus
focused
formal
formed
fortunate
fostering
frank
fresh
fried
friendly
fruitful
fulfilled
full
futuristic
generous
gentle
genuine
gifted
gigantic
glamorous
glorious
glossy
glowing
gorgeous
graceful
gracious
grand
granted
grateful
great
grilled
grounded
grown
guarded
guided
hailed
handsome
healing
healthy
heartfelt
heavenly
heroic
highly
historic
holistic
holy
honest
honored
hoped
hopeful
iconic
ideal
illuminated
illuminating
illumination
illustrious
imaginative
imagined
immense
immortal
imposing
impressive
improved
incredible
infinite
informed
ingenious
innocent
innovative
insightful
inspirational
inspired
inspiring
instructed
integrated
intense
intricate
intriguing
invaluable
invented
investigative
invincible
inviting
irresistible
joined
joyful
keen
kindly
kinetic
knockout
laced
lasting
lauded
lavish
legendary
lifted
light
limited
linked
lively
located
logical
loved
lovely
loving
loyal
lucid
lucky
lush
luxurious
luxury
magic
magical
magnificent
majestic
marked
marvelous
massive
matched
matured
meaningful
memorable
merged
merry
meticulous
mindful
miraculous
modern
modified
monstrous
monumental
motivated
motivational
moved
moving
mystical
mythical
naive
neat
new
nice
nifty
noble
notable
noteworthy
novel
nuanced
offered
open
optimal
optimistic
orderly
organized
original
originated
outstanding
overwhelming
paired
palpable
passionate
peaceful
perfect
perfected
perpetual
persistent
phenomenal
pious
pivotal
placed
planned
pleasant
pleased
pleasing
plentiful
plotted
plush
poetic
poignant
polished
positive
praised
precious
precise
premier
premium
presented
preserved
prestigious
pretty
priceless
prime
pristine
probing
productive
professional
profound
progressed
progressive
prominent
promoted
pronounced
propelled
proportional
prosperous
protected
provided
provocative
pure
pursued
pushed
quaint
quality
questioning
quiet
radiant
rare
rational
real
reborn
reclaimed
recognized
recovered
refined
reflected
refreshed
refreshing
related
relaxed
relentless
reliable
relieved
remarkable
renewed
renowned
representative
rescued
resilient
respected
respectful
restored
retrieved
revealed
revealing
revered
revived
rewarded
rich
roasted
robust
romantic
royal
sacred
salient
satisfied
satisfying
saturated
saved
scenic
scientific
select
sensational
serious
set
shaped
sharp
shielded
shining
shiny
shown
significant
silent
sincere
singular
situated
sleek
slick
smart
snug
solemn
solid
soothing
sophisticated
sought
sparkling
special
spectacular
sped
spirited
spiritual
splendid
spread
stable
steady
still
stimulated
stimulating
stirred
straightforward
striking
strong
structured
stunning
sturdy
stylish
sublime
successful
sunny
superb
superior
supplied
supported
supportive
supreme
sure
surreal
sweet
symbolic
symmetry
synchronized
systematic
tailored
taking
targeted
taught
tempting
tender
terrific
thankful
theatrical
thought
thoughtful
thrilled
thrilling
thriving
tidy
timeless
touching
tough
trained
tranquil
transformed
translucent
transparent
transported
tremendous
trendy
tried
trim
true
trustworthy
unbelievable
unconditional
uncovered
unified
unique
united
universal
unmatched
unparalleled
upheld
valiant
valued
varied
very
vibrant
virtuous
vivid
warm
wealthy
whole
winning
wished
witty
wonderful
worshipped
worthy

View File

@@ -0,0 +1,5 @@
{
"bos_token": "<|endoftext|>",
"eos_token": "<|endoftext|>",
"unk_token": "<|endoftext|>"
}

100304
models/tokenizer.json Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
{
"add_prefix_space": false,
"bos_token": "<|endoftext|>",
"eos_token": "<|endoftext|>",
"model_max_length": 1024,
"name_or_path": "gpt2",
"special_tokens_map_file": null,
"tokenizer_class": "GPT2Tokenizer",
"unk_token": "<|endoftext|>"
}

1
models/vocab.json Normal file

File diff suppressed because one or more lines are too long

242
scripts/expansion.py Normal file
View File

@@ -0,0 +1,242 @@
# Fooocus GPT2 Expansion
# Algorithm created by Lvmin Zhang at 2023, Stanford
# modified by PlayDystinDB and GPT-4O for stable-diffusion-webui
# If used inside Fooocus, any use is permitted.
# If used outside Fooocus, only non-commercial use is permitted (CC-By NC 4.0).
# This applies to the word list, vocab, model, and algorithm.
import os
import torch
import math
import gradio as gr
import psutil
from modules.scripts import basedir
from transformers.generation.logits_process import LogitsProcessorList
from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
from modules import scripts, shared, script_callbacks
from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton
def text_encoder_device():
if torch.cuda.is_available():
return torch.device(torch.cuda.current_device())
else:
return torch.device("cpu")
def text_encoder_offload_device():
if torch.cuda.is_available():
return torch.device(torch.cuda.current_device())
else:
return torch.device("cpu")
def get_free_memory(dev=None, torch_free_too=False):
global directml_enabled
if dev is None:
dev = text_encoder_device()
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
mem_free_total = psutil.virtual_memory().available
mem_free_torch = mem_free_total
else:
if directml_enabled:
mem_free_total = 1024 * 1024 * 1024 #TODO
mem_free_torch = mem_free_total
else:
stats = torch.cuda.memory_stats(dev)
mem_active = stats['active_bytes.all.current']
mem_reserved = stats['reserved_bytes.all.current']
mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
mem_free_torch = mem_reserved - mem_active
mem_free_total = mem_free_cuda + mem_free_torch
# limitation of np.random.seed(), called from transformers.set_seed()
SEED_LIMIT_NUMPY = 2**32
neg_inf = - 8192.0
ext_dir = basedir()
path_fooocus_expansion = os.path.join('.', "models", "prompt_expansion")
def safe_str(x):
x = str(x)
for _ in range(16):
x = x.replace(' ', ' ')
return x.strip(",. \r\n")
def remove_pattern(x, pattern):
for p in pattern:
x = x.replace(p, '')
return x
def should_use_fp16(device=None, model_params=0, prioritize_performance=True):
if device is not None:
if hasattr(device, 'type'):
if device.type == 'cpu':
return False
return False
if torch.cuda.is_bf16_supported():
return True
props = torch.cuda.get_device_properties("cuda")
if props.major < 6:
return False
fp16_works = False
#FP16 is confirmed working on a 1080 (GP104) but it's a bit slower than FP32 so it should only be enabled
#when the model doesn't actually fit on the card
#TODO: actually test if GP106 and others have the same type of behavior
nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050"]
for x in nvidia_10_series:
if x in props.name.lower():
fp16_works = True
if fp16_works:
free_model_memory = (get_free_memory() * 0.9 - (1024 * 1024 * 1024))
if (not prioritize_performance) or model_params * 4 > free_model_memory:
return True
if props.major < 7:
return False
#FP16 is just broken on these cards
nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX", "T2000", "T1000", "T1200"]
for x in nvidia_16_series:
if x in props.name:
return False
return True
def is_device_mps(device):
if hasattr(device, 'type'):
if (device.type == 'mps'):
return True
return False
class FooocusExpansion:
def __init__(self):
global load_model_device
print(f'Loading models from {path_fooocus_expansion}')
self.tokenizer = AutoTokenizer.from_pretrained(path_fooocus_expansion)
positive_words = open(os.path.join(path_fooocus_expansion, 'positive.txt'),
encoding='utf-8').read().splitlines()
positive_words = ['Ġ' + x.lower() for x in positive_words if x != '']
self.logits_bias = torch.zeros((1, len(self.tokenizer.vocab)), dtype=torch.float32) + neg_inf
debug_list = []
for k, v in self.tokenizer.vocab.items():
if k in positive_words:
self.logits_bias[0, v] = 0
debug_list.append(k[1:])
print(f'Fooocus V2 Expansion: Vocab with {len(debug_list)} words.')
self.model = AutoModelForCausalLM.from_pretrained(path_fooocus_expansion)
self.model.eval()
load_model_device = text_encoder_device()
offload_device = text_encoder_offload_device()
# MPS hack
if is_device_mps(load_model_device):
load_model_device = torch.device('cpu')
offload_device = torch.device('cpu')
use_fp16 = should_use_fp16(device=load_model_device)
if use_fp16:
self.model.half()
self.model.to(load_model_device) # Ensure model is on the correct device
print(f'Fooocus Expansion engine loaded for {load_model_device}, use_fp16 = {use_fp16}.')
@torch.no_grad()
@torch.inference_mode()
def logits_processor(self, input_ids, scores):
assert scores.ndim == 2 and scores.shape[0] == 1
self.logits_bias = self.logits_bias.to(load_model_device)
bias = self.logits_bias.clone().to(load_model_device) # Ensure bias is on the correct device
bias[0, input_ids[0].to(load_model_device).long()] = neg_inf # Ensure input_ids are on the correct device
bias[0, 11] = 0
return scores + bias.to(scores.device) # Ensure bias is on the same device as scores
@torch.no_grad()
@torch.inference_mode()
def __call__(self, prompt, seed):
if prompt == '':
return ''
seed = int(seed) % SEED_LIMIT_NUMPY
set_seed(seed)
prompt = safe_str(prompt) + ','
tokenized_kwargs = self.tokenizer(prompt, return_tensors="pt")
tokenized_kwargs.data['input_ids'] = tokenized_kwargs.data['input_ids'].to(load_model_device)
tokenized_kwargs.data['attention_mask'] = tokenized_kwargs.data['attention_mask'].to(load_model_device)
current_token_length = int(tokenized_kwargs.data['input_ids'].shape[1])
max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0))
max_new_tokens = max_token_length - current_token_length
features = self.model.generate(**tokenized_kwargs,
top_k=100,
max_new_tokens=max_new_tokens,
do_sample=True,
logits_processor=LogitsProcessorList([self.logits_processor]))
response = self.tokenizer.batch_decode(features, skip_special_tokens=True)
result = safe_str(response[0])
return result
def createPositive(positive, seed):
try:
expansion = FooocusExpansion()
positive = expansion(positive, seed=seed)
return positive
except Exception as e:
print(f"An error occurred: {str(e)}")
class FooocusPromptExpansion(scripts.Script):
def __init__(self) -> None:
super().__init__()
def title(self):
return 'Fooocus Expansion'
def show(self, is_img2img):
return scripts.AlwaysVisible
def ui(self, is_img2img):
with gr.Group():
with gr.Accordion("Fooocus Expansion", open=True):
is_enabled = gr.Checkbox(
value=True, label="Enable Expansion", info="Enable Or Disable Expansion ")
seed = gr.Number(
value=0, maximum=63, label="Seed", info="Seed for random number generator")
return [is_enabled, seed]
def process(self, p, is_enabled, seed):
if not is_enabled:
return
for i, prompt in enumerate(p.all_prompts):
positivePrompt = createPositive(prompt, seed)
p.all_prompts[i] = positivePrompt
def after_component(self, component, **kwargs):
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/7456#issuecomment-1414465888 helpfull link
# Find the text2img textbox component
if kwargs.get("elem_id") == "txt2img_prompt": # postive prompt textbox
self.boxx = component
# Find the img2img textbox component
if kwargs.get("elem_id") == "img2img_prompt": # postive prompt textbox
self.boxxIMG = component