birefnet update for HR model (#2629)

old model still available as an option
This commit is contained in:
DenOfEquity
2025-02-07 21:30:05 +00:00
committed by GitHub
parent 21c907ef87
commit e21a6e15b6

View File

@@ -2,7 +2,8 @@
import spaces import spaces
import os import os
import gradio as gr import gradio as gr
from gradio_imageslider import ImageSlider import gc
from loadimg import load_img from loadimg import load_img
from transformers import AutoModelForImageSegmentation from transformers import AutoModelForImageSegmentation
import torch import torch
@@ -13,38 +14,54 @@ import pathlib
from PIL import Image from PIL import Image
with spaces.capture_gpu_object() as birefnet_gpu_obj: transform_image = None
birefnet = None
def load_model(model):
global birefnet
birefnet = None
gc.collect()
torch.cuda.empty_cache()
birefnet = AutoModelForImageSegmentation.from_pretrained( birefnet = AutoModelForImageSegmentation.from_pretrained(
"ZhengPeng7/BiRefNet", trust_remote_code=True model, trust_remote_code=True
) )
birefnet.eval()
birefnet.half()
spaces.automatically_move_to_gpu_when_forward(birefnet) spaces.automatically_move_to_gpu_when_forward(birefnet)
transform_image = transforms.Compose( with spaces.capture_gpu_object() as birefnet_gpu_obj:
[ load_model("ZhengPeng7/BiRefNet_HR")
transforms.Resize((1024, 1024)),
transforms.ToTensor(), def common_setup(size):
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), global transform_image
]
) transform_image = transforms.Compose(
[
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
@spaces.GPU(gpu_objects=[birefnet_gpu_obj], manual_load=True) @spaces.GPU(gpu_objects=[birefnet_gpu_obj], manual_load=True)
def fn(image): def process(image):
im = load_img(image, output_type="pil") im = load_img(image, output_type="pil")
im = im.convert("RGB") im = im.convert("RGB")
image_size = im.size image_size = im.size
origin = im.copy()
image = load_img(im) image = load_img(im)
input_images = transform_image(image).unsqueeze(0).to(spaces.gpu) input_image = transform_image(image).unsqueeze(0).to(spaces.gpu).to(torch.float16)
# Prediction # Prediction
with torch.no_grad(): with torch.no_grad():
preds = birefnet(input_images)[-1].sigmoid().cpu() preds = birefnet(input_image)[-1].sigmoid().cpu()
pred = preds[0].squeeze() pred = preds[0].squeeze()
pred_pil = transforms.ToPILImage()(pred) pred_pil = transforms.ToPILImage()(pred)
mask = pred_pil.resize(image_size) mask = pred_pil.resize(image_size)
image.putalpha(mask) image.putalpha(mask)
return (image, origin) return image
@spaces.GPU(gpu_objects=[birefnet_gpu_obj], manual_load=True) @spaces.GPU(gpu_objects=[birefnet_gpu_obj], manual_load=True)
def batch_process(input_folder, output_folder, save_png, save_flat): def batch_process(input_folder, output_folder, save_png, save_flat):
@@ -52,7 +69,7 @@ def batch_process(input_folder, output_folder, save_png, save_flat):
os.makedirs(output_folder, exist_ok=True) os.makedirs(output_folder, exist_ok=True)
# Supported image extensions # Supported image extensions
image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.webp'] image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.webp', ".avif"]
# Collect all image files from input folder # Collect all image files from input folder
input_images = [] input_images = []
@@ -70,8 +87,8 @@ def batch_process(input_folder, output_folder, save_png, save_flat):
image = load_img(im) image = load_img(im)
# Prepare image for processing # Prepare image for processing
input_image = transform_image(image).unsqueeze(0).to(spaces.gpu) input_image = transform_image(image).unsqueeze(0).to(spaces.gpu).to(torch.float16)
# Prediction # Prediction
with torch.no_grad(): with torch.no_grad():
preds = birefnet(input_image)[-1].sigmoid().cpu() preds = birefnet(input_image)[-1].sigmoid().cpu()
@@ -105,41 +122,68 @@ def batch_process(input_folder, output_folder, save_png, save_flat):
return processed_images return processed_images
slider1 = ImageSlider(label="birefnet", type="pil")
slider2 = ImageSlider(label="birefnet", type="pil") def unload():
image = gr.Image(label="Upload an image") global birefnet, transform_image
text = gr.Textbox(label="URL to image, or local path to image", max_lines=1) birefnet = None
transform_image = None
gc.collect()
torch.cuda.empty_cache()
chameleon = load_img(spaces.convert_root_path() + "chameleon.jpg", output_type="pil") css = """
.gradio-container {
max-width: 1280px !important;
}
footer {
display: none !important;
}
"""
url = "https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg" with gr.Blocks(css=css, analytics_enabled=False) as demo:
tab1 = gr.Interface( gr.Markdown("# birefnet for background removal")
fn, inputs=image, outputs=slider1, examples=[chameleon], api_name="image", allow_flagging="never"
)
tab2 = gr.Interface( with gr.Tab("image"):
fn, inputs=text, outputs=slider2, examples=[url], api_name="text", allow_flagging="never" with gr.Row():
) with gr.Column():
image = gr.Image(label="Upload an image", type='pil', height=616)
go_image = gr.Button("Remove background")
with gr.Column():
result1 = gr.Image(label="birefnet", type="pil", height=576)
tab3 = gr.Interface( with gr.Tab("URL"):
batch_process, with gr.Row():
inputs=[ with gr.Column():
gr.Textbox(label="Input folder path", max_lines=1), text = gr.Textbox(label="URL to image, or local path to image", max_lines=1)
gr.Textbox(label="Output folder path (will overwrite)", max_lines=1), go_text = gr.Button("Remove background")
gr.Checkbox(label="Always save as PNG", value=True), with gr.Column():
gr.Checkbox(label="Save flat (no mask)", value=False) result2 = gr.Image(label="birefnet", type="pil", height=576)
],
outputs=gr.File(label="Processed images", type="filepath", file_count="multiple"),
api_name="batch",
allow_flagging="never"
)
demo = gr.TabbedInterface( with gr.Tab("batch"):
[tab1, tab2, tab3], with gr.Row():
["image", "URL", "batch"], with gr.Column():
title="birefnet for background removal" input_dir = gr.Textbox(label="Input folder path", max_lines=1)
) output_dir = gr.Textbox(label="Output folder path (will overwrite)", max_lines=1)
always_png = gr.Checkbox(label="Always save as PNG", value=True)
save_flat = gr.Checkbox(label="Save flat (no mask)", value=False)
go_batch = gr.Button("Remove background(s)")
with gr.Column():
result3 = gr.File(label="Processed image(s)", type="filepath", file_count="multiple")
with gr.Tab("options"):
model = gr.Dropdown(label="Model",
choices=["ZhengPeng7/BiRefNet", "ZhengPeng7/BiRefNet_HR"], value="ZhengPeng7/BiRefNet_HR", type="value")
proc_size = gr.Dropdown(label="birefnet processing image size", info="1024: old model; 2048: HR model - more accurate, uses more VRAM (shared memory works well)",
choices=[1024, 1536, 2048], value=2048)
model.change(fn=load_model, inputs=model, outputs=None)
go_image.click(fn=common_setup, inputs=[proc_size]).then(fn=process, inputs=image, outputs=result1)
go_text.click( fn=common_setup, inputs=[proc_size]).then(fn=process, inputs=text, outputs=result2)
go_batch.click(fn=common_setup, inputs=[proc_size]).then(fn=batch_process, inputs=[input_dir, output_dir, always_png, save_flat], outputs=result3)
demo.unload(unload)
if __name__ == "__main__": if __name__ == "__main__":
demo.launch(inbrowser=True) demo.launch(inbrowser=True)