from nbdev.export import nb_export
'ObjectDetectionWithAzureCustomVision_Part_3', lib_path='.', name='app') nb_export(
Part 3. Deploy Gradio Web App
Introduction
After we train the model in Azure for 1 hour (free tier) and publishing it, when end up with a Prediction URL.
We are going to use that Prediction endpoint to do the inference.
Here is the App already published for you to try:
Telecom-Object-Detection
And here the repository:
https://huggingface.co/spaces/fmussari/Telecom-Object-Detection/tree/main
Tutorial Parts
- Part 1 covered:
- Creating a free Azure Custom Vision Service.
- Uploading the images to the service.
- Part 2 covered:
- Analyzing what happens to the images after uploading.
- How to label the images using Smart Labeler
- Training and testing the model.
- Part 3:
- Create a Huggingface Gradio Demo.
References
Part 3.1. Publishing a Gradio App
Gradio is a great tool to demo machine learning models. The model is already deployed in Azure, so our Gradio App is going to be our front end to connect to that prediction endpoint. What I mean is that the model itself is not going to be deployed in Hugging Face Spaces, which is the normal workflow.
If you are new to Gradio, I encourage you to start from the Quickstart.
The Gradio demo was created from a Jupyter Notebook with a great tool from fast.ai which is nbdev. You can start learning the basics here: Create A 🤗 Space From A Notebook
In both tutorials you will find the instructions to setup a Gradio enabled space in Hugging Face.
This code is based and adapted from:
- https://github.com/MicrosoftLearning/AI-102-AIEngineer/blob/master/18-object-detection/Python/test-detector/test-detector.py
- https://huggingface.co/spaces/Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS/blob/main/app.py
Install and import libraries
#|export
import gradio as gr
import numpy as np
import os
import io
import requests, validators
from pathlib import Path
Azure Functions
#| export
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
from dotenv import load_dotenv
Environment variables
Update the configuration variables in the .env file that contains:
PredictionEndpoint=YOUR_PREDICTION_ENDPOINT
PredictionKey=YOUR_PREDICTION_KEY
ProjectID=YOUR_PROJECT_ID
ModelName=YOUR_PUBLISHED_MODEL
We need to create these environment variables in the Hugging Face Spaces repository under Settings -> Repo Secrets:
Credentials and services
#| export
def fig2img(fig):
= io.BytesIO()
buf
fig.savefig(buf)0)
buf.seek(= Image.open(buf)
img return img
def custom_vision_detect_objects(image_file: Path):
= 100
dpi
# Get Configuration Settings
load_dotenv()= os.getenv('PredictionEndpoint')
prediction_endpoint = os.getenv('PredictionKey')
prediction_key = os.getenv('ProjectID')
project_id = os.getenv('ModelName')
model_name
# Authenticate a client for the training API
= ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
credentials = CustomVisionPredictionClient(
prediction_client =prediction_endpoint, credentials=credentials)
endpoint
# Load image and get height, width and channels
#image_file = 'produce.jpg'
print('Detecting objects in', image_file)
= Image.open(image_file)
image = np.array(image).shape
h, w, ch
# Detect objects in the test image
with open(image_file, mode="rb") as image_data:
= prediction_client.detect_image(project_id, model_name, image_data)
results
# Create a figure for the results
= plt.figure(figsize=(w/dpi, h/dpi))
fig 'off')
plt.axis(
# Display the image with boxes around each detected object
= ImageDraw.Draw(image)
draw = int(w/800)
lineWidth = 'cyan'
color
for prediction in results.predictions:
# Only show objects with a > 50% probability
if (prediction.probability*100) > 50:
# Box coordinates and dimensions are proportional - convert to absolutes
= prediction.bounding_box.left * w
left = prediction.bounding_box.top * h
top = prediction.bounding_box.height * h
height = prediction.bounding_box.width * w
width
# Draw the box
= ((left,top), (left+width,top),
points +width,top+height), (left,top+height),
(left
(left,top))=color, width=lineWidth)
draw.line(points, fill
# Add the tag name and probability
plt.annotate(+ ": {0:.0f}%".format(prediction.probability * 100),
prediction.tag_name -1.372*h/dpi),
(left, top=color,
backgroundcolor=max(w/dpi, h/dpi),
fontsize='monospace'
fontfamily
)
plt.imshow(image)=0)
plt.tight_layout(pad
return fig2img(fig)
= 'output.jpg'
outputfile
fig.savefig(outputfile)print('Resulabsts saved in ', outputfile)
Gradio
#| export
= """<h1 id="title">Telecom Object Detection with Azure Custom Vision</h1>"""
title
= """
css h1#title {
text-align: center;
}
"""
Example images and url to be used in the App
#| export
= ["https://www.dropbox.com/s/y5bk8om5ucu46d3/747.jpg?dl=1"]
urls = [path.as_posix() for path in sorted(Path('images').rglob('*.jpg'))]
imgs = [[path.as_posix()] for path in sorted(Path('images').rglob('*.jpg'))] img_samples
Functions for the Gradio App
#| export
def set_example_url(example: list) -> dict:
print(gr.Textbox.update(value=example[0]))
return gr.Textbox.update(value=example[0])
def set_example_image(example: list) -> dict:
return gr.Image.update(value=example[0])
def detect_objects(url_input:str, image_input:Image):
print(f"{url_input=}")
if validators.url(url_input):
= Image.open(requests.get(url_input, stream=True).raw)
image elif image_input:
= image_input
image
print(image)
print(image.size)
= image.size
w, h
if max(w, h) > 1_200:
= 1_200 / max(w, h)
factor = 1
factor = (int(w*factor), int(h*factor))
size = image.resize(size, resample=Image.Resampling.BILINEAR)
image
= "input_object_detection.jpg"
resized_image_path
image.save(resized_image_path)
return custom_vision_detect_objects(resized_image_path)
#| export
with gr.Blocks(css=css) as demo:
gr.Markdown(title)
with gr.Tabs():
with gr.TabItem("Image Upload"):
with gr.Row():
= gr.Image(type='pil')
image_input = gr.Image(shape=(650,650))
image_output
with gr.Row():
= gr.Dataset(components=[image_input], samples=img_samples)
example_images
= gr.Button("Detect")
image_button
with gr.TabItem("Image URL"):
with gr.Row():
= gr.Textbox(lines=2, label='Enter valid image URL here..')
url_input = gr.Image(shape=(650,650))
img_output_from_url
with gr.Row():
= gr.Dataset(components=[url_input], samples=[[str(url)] for url in urls])
example_url = gr.Button("Detect")
url_button
=[url_input,image_input], outputs=img_output_from_url)
url_button.click(detect_objects, inputs=[url_input,image_input], outputs=image_output)
image_button.click(detect_objects, inputs
=set_example_url, inputs=[example_url], outputs=[url_input])
example_url.click(fn=set_example_image, inputs=[example_images], outputs=[image_input])
example_images.click(fn
demo.launch()
To publish the script app.py
from the notebook:
Conslusions
- Gradio is great for publishing our demo Apps.