Joe Booth
commited on
Commit
·
8498cb9
1
Parent(s):
5bbcb1c
clean up info text
Browse files
app.py
CHANGED
|
@@ -73,10 +73,7 @@ def base64_to_embedding(embeddings_b64):
|
|
| 73 |
def main(
|
| 74 |
# input_im,
|
| 75 |
embeddings,
|
| 76 |
-
scale=3.0,
|
| 77 |
n_samples=4,
|
| 78 |
-
steps=25,
|
| 79 |
-
seed=None
|
| 80 |
):
|
| 81 |
|
| 82 |
embeddings = base64_to_embedding(embeddings)
|
|
@@ -293,9 +290,9 @@ with gr.Blocks() as demo:
|
|
| 293 |
with gr.Column(scale=5):
|
| 294 |
gr.Markdown(
|
| 295 |
"""
|
| 296 |
-
# Soho-Clip
|
| 297 |
|
| 298 |
-
A tool for exploring CLIP embedding
|
| 299 |
|
| 300 |
Try uploading a few images and/or add some text prompts and click generate images.
|
| 301 |
""")
|
|
@@ -365,16 +362,10 @@ Try uploading a few images and/or add some text prompts and click generate image
|
|
| 365 |
with gr.Accordion(f"Avergage embeddings in base 64", open=False):
|
| 366 |
average_embedding_base64 = gr.Textbox(show_label=False)
|
| 367 |
with gr.Row():
|
| 368 |
-
submit = gr.Button("Search embedding space")
|
| 369 |
-
with gr.Row():
|
| 370 |
-
with gr.Column(scale=1, min_width=200):
|
| 371 |
-
scale = gr.Slider(0, 25, value=3, step=1, label="Guidance scale")
|
| 372 |
with gr.Column(scale=1, min_width=200):
|
| 373 |
n_samples = gr.Slider(1, 16, value=4, step=1, label="Number images")
|
| 374 |
-
with gr.Column(scale=
|
| 375 |
-
|
| 376 |
-
with gr.Column(scale=1, min_width=200):
|
| 377 |
-
seed = gr.Number(None, label="Seed (blank = random)", precision=0)
|
| 378 |
with gr.Row():
|
| 379 |
output = gr.Gallery(label="Generated variations")
|
| 380 |
|
|
@@ -391,7 +382,7 @@ Try uploading a few images and/or add some text prompts and click generate image
|
|
| 391 |
average_embedding_base64.change(on_embeddings_changed_update_plot, average_embedding_base64, average_embedding_plot)
|
| 392 |
|
| 393 |
# submit.click(main, inputs= [embedding_base64s[0], scale, n_samples, steps, seed], outputs=output)
|
| 394 |
-
submit.click(main, inputs= [average_embedding_base64,
|
| 395 |
output.style(grid=2)
|
| 396 |
|
| 397 |
with gr.Row():
|
|
@@ -403,18 +394,15 @@ My interest is to use CLIP for image/video understanding (see [CLIP_visual-spati
|
|
| 403 |
### Initial Features
|
| 404 |
|
| 405 |
- Combine up to 10 Images and/or text inputs to create an average embedding space.
|
| 406 |
-
-
|
| 407 |
-
- Generate a new image based on the average embedding space
|
| 408 |
|
| 409 |
### Known limitations
|
| 410 |
|
| 411 |
-
-
|
| 412 |
-
- It can only generate a single image at a time
|
| 413 |
-
- Not easy to use the sample images
|
| 414 |
|
| 415 |
### Acknowledgements
|
| 416 |
|
| 417 |
-
- I heavily build on
|
| 418 |
- [CLIP](https://openai.com/blog/clip/)
|
| 419 |
- [Stable Diffusion](https://github.com/CompVis/stable-diffusion)
|
| 420 |
|
|
|
|
| 73 |
def main(
|
| 74 |
# input_im,
|
| 75 |
embeddings,
|
|
|
|
| 76 |
n_samples=4,
|
|
|
|
|
|
|
| 77 |
):
|
| 78 |
|
| 79 |
embeddings = base64_to_embedding(embeddings)
|
|
|
|
| 290 |
with gr.Column(scale=5):
|
| 291 |
gr.Markdown(
|
| 292 |
"""
|
| 293 |
+
# Soho-Clip Embeddings Explorer
|
| 294 |
|
| 295 |
+
A tool for exploring CLIP embedding space.
|
| 296 |
|
| 297 |
Try uploading a few images and/or add some text prompts and click generate images.
|
| 298 |
""")
|
|
|
|
| 362 |
with gr.Accordion(f"Avergage embeddings in base 64", open=False):
|
| 363 |
average_embedding_base64 = gr.Textbox(show_label=False)
|
| 364 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
with gr.Column(scale=1, min_width=200):
|
| 366 |
n_samples = gr.Slider(1, 16, value=4, step=1, label="Number images")
|
| 367 |
+
with gr.Column(scale=3, min_width=200):
|
| 368 |
+
submit = gr.Button("Search embedding space")
|
|
|
|
|
|
|
| 369 |
with gr.Row():
|
| 370 |
output = gr.Gallery(label="Generated variations")
|
| 371 |
|
|
|
|
| 382 |
average_embedding_base64.change(on_embeddings_changed_update_plot, average_embedding_base64, average_embedding_plot)
|
| 383 |
|
| 384 |
# submit.click(main, inputs= [embedding_base64s[0], scale, n_samples, steps, seed], outputs=output)
|
| 385 |
+
submit.click(main, inputs= [average_embedding_base64, n_samples], outputs=output)
|
| 386 |
output.style(grid=2)
|
| 387 |
|
| 388 |
with gr.Row():
|
|
|
|
| 394 |
### Initial Features
|
| 395 |
|
| 396 |
- Combine up to 10 Images and/or text inputs to create an average embedding space.
|
| 397 |
+
- Search the laion 5b immages via a knn search
|
|
|
|
| 398 |
|
| 399 |
### Known limitations
|
| 400 |
|
| 401 |
+
- ...
|
|
|
|
|
|
|
| 402 |
|
| 403 |
### Acknowledgements
|
| 404 |
|
| 405 |
+
- I heavily build on [clip-retrieval](https://rom1504.github.io/clip-retrieval/) and use their API. Please [citate](https://github.com/rom1504/clip-retrieval#citation) the authors if you use this work.
|
| 406 |
- [CLIP](https://openai.com/blog/clip/)
|
| 407 |
- [Stable Diffusion](https://github.com/CompVis/stable-diffusion)
|
| 408 |
|