Spaces:
Build error
Build error
Fix: Update deprecated APIs and dependencies
Browse files## Fix: Update Space to work with current Gradio/dependencies
This PR fixes the following issues preventing the Space from running:
- Updated `sdk_version` from `3.0.17` to `5.29.1`
- Removed deprecated `enable_queue` parameter
- Replaced deprecated `gr.inputs.*` with `gr.*`
- Replaced deprecated `gr.inputs.*` with `gr.*`
- Replaced deprecated `gr.outputs.*` with `gr.*`
### Details
The Space is currently showing as `BUILD_ERROR`. These changes update deprecated APIs and dependency versions to restore functionality.
---
*This fix was generated by [smolagents/ml-agent](https://huggingface.co/smolagents/ml-agent) 🤖*
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🦀
|
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: bsd-3-clause
|
|
|
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.29.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: bsd-3-clause
|
app.py
CHANGED
|
@@ -59,12 +59,12 @@ def inference(raw_image, model_n, question, strategy):
|
|
| 59 |
|
| 60 |
inputs = [
|
| 61 |
gr.Image(type='pil', interactive=False),
|
| 62 |
-
gr.
|
| 63 |
type="value",
|
| 64 |
default="Image Captioning",
|
| 65 |
label="Task"
|
| 66 |
-
),gr.
|
| 67 |
-
outputs = gr.
|
| 68 |
|
| 69 |
title = "BLIP"
|
| 70 |
|
|
@@ -72,4 +72,4 @@ description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training f
|
|
| 72 |
|
| 73 |
article = """<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"""
|
| 74 |
|
| 75 |
-
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]]).launch(
|
|
|
|
| 59 |
|
| 60 |
inputs = [
|
| 61 |
gr.Image(type='pil', interactive=False),
|
| 62 |
+
gr.Radio(choices=['Image Captioning',"Visual Question Answering"],
|
| 63 |
type="value",
|
| 64 |
default="Image Captioning",
|
| 65 |
label="Task"
|
| 66 |
+
),gr.Textbox(lines=2, label="Question"),gr.Radio(choices=['Beam search','Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
|
| 67 |
+
outputs = gr.Textbox(label="Output")
|
| 68 |
|
| 69 |
title = "BLIP"
|
| 70 |
|
|
|
|
| 72 |
|
| 73 |
article = """<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"""
|
| 74 |
|
| 75 |
+
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]]).launch()
|