Exemples Gradio

Exemples de déploiement rapide de modèles d'apprentissage automatique utilisant Gradio pour créer des interfaces web interactives avec un code minimal

💻 Hello World de Base python

🟢 simple

Exemple d'introduction simple à l'interface Gradio

# Gradio Hello World Examples
# Quick start with basic Gradio interfaces

import gradio as gr
import time
import random

# 1. Basic Interface
def hello_world(name):
    return f"Hello, {name}! Welcome to Gradio."

# Create simple interface
iface = gr.Interface(
    fn=hello_world,
    inputs=gr.Textbox(label="Enter your name"),
    outputs=gr.Textbox(label="Greeting"),
    title="Hello World",
    description="Simple greeting interface"
)

# 2. Multiple Inputs and Outputs
def calculator(operation, num1, num2):
    """Basic calculator with multiple operations"""
    try:
        num1, num2 = float(num1), float(num2)

        if operation == "add":
            return f"Result: {num1 + num2}"
        elif operation == "subtract":
            return f"Result: {num1 - num2}"
        elif operation == "multiply":
            return f"Result: {num1 * num2}"
        elif operation == "divide":
            if num2 != 0:
                return f"Result: {num1 / num2}"
            else:
                return "Error: Cannot divide by zero"
        else:
            return "Error: Invalid operation"
    except ValueError:
        return "Error: Please enter valid numbers"

calculator_interface = gr.Interface(
    fn=calculator,
    inputs=[
        gr.Radio(
            choices=["add", "subtract", "multiply", "divide"],
            label="Operation",
            value="add"
        ),
        gr.Number(label="First Number"),
        gr.Number(label="Second Number")
    ],
    outputs=gr.Textbox(label="Result"),
    title="Simple Calculator",
    description="Perform basic arithmetic operations"
)

# 3. Image Processing Interface
def apply_filter(image, filter_type):
    """Apply basic filters to images"""
    if image is None:
        return None

    import cv2
    import numpy as np

    # Convert PIL to numpy array
    img_array = np.array(image)

    if filter_type == "grayscale":
        gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
        return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
    elif filter_type == "blur":
        return cv2.GaussianBlur(img_array, (15, 15), 0)
    elif filter_type == "edge detection":
        gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
        edges = cv2.Canny(gray, 100, 200)
        return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
    else:
        return img_array

image_interface = gr.Interface(
    fn=apply_filter,
    inputs=[
        gr.Image(label="Upload Image"),
        gr.Radio(
            choices=["none", "grayscale", "blur", "edge detection"],
            label="Filter Type",
            value="none"
        )
    ],
    outputs=gr.Image(label="Processed Image"),
    title="Basic Image Filter",
    description="Apply simple filters to your images"
)

# 4. Custom Function with Progress
def slow_function(text, progress=gr.Progress()):
    """Function that takes time with progress indication"""
    total_steps = 10
    result = ""

    for i in range(total_steps):
        time.sleep(0.1)  # Simulate work
        progress((i + 1) / total_steps, f"Processing step {i+1}/{total_steps}")
        result += f"Step {i+1}: Processing '{text}'\n"

    return result + "\nProcessing complete!"

progress_interface = gr.Interface(
    fn=slow_function,
    inputs=gr.Textbox(label="Text to Process"),
    outputs=gr.Textbox(label="Processing Result"),
    title="Progress Example",
    description="See progress tracking in action"
)

# 5. Using Blocks for More Complex Layouts
def create_blocks_demo():
    """Create a more complex interface using Blocks"""

    def process_data(name, age, country, accept_terms):
        if not accept_terms:
            return "Please accept the terms and conditions"

        return f"""
        Registration Successful!
        Name: {name}
        Age: {age}
        Country: {country}
        Timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}
        """

    with gr.Blocks(title="Complex Interface") as demo:
        gr.Markdown("# Registration Form")
        gr.Markdown("Fill out the form below to register:")

        with gr.Row():
            with gr.Column():
                name_input = gr.Textbox(label="Full Name", placeholder="Enter your name")
                age_input = gr.Number(label="Age", minimum=0, maximum=120)
            with gr.Column():
                country_input = gr.Dropdown(
                    choices=["USA", "UK", "Canada", "Australia", "Other"],
                    label="Country"
                )
                terms_checkbox = gr.Checkbox(label="I accept the terms and conditions")

        submit_btn = gr.Button("Submit", variant="primary")
        output_text = gr.Textbox(label="Result", interactive=False)

        submit_btn.click(
            process_data,
            inputs=[name_input, age_input, country_input, terms_checkbox],
            outputs=output_text
        )

    return demo

# 6. Examples Gallery
def text_sentiment(text):
    """Mock sentiment analysis"""
    positive_words = ["good", "great", "excellent", "amazing", "wonderful"]
    negative_words = ["bad", "terrible", "awful", "horrible", "worst"]

    text_lower = text.lower()
    positive_count = sum(1 for word in positive_words if word in text_lower)
    negative_count = sum(1 for word in negative_words if word in text_lower)

    if positive_count > negative_count:
        sentiment = "Positive 😊"
        score = min(0.9, 0.5 + (positive_count - negative_count) * 0.1)
    elif negative_count > positive_count:
        sentiment = "Negative 😔"
        score = max(0.1, 0.5 - (negative_count - positive_count) * 0.1)
    else:
        sentiment = "Neutral 😐"
        score = 0.5

    return {
        "sentiment": sentiment,
        "confidence": score
    }

sentiment_interface = gr.Interface(
    fn=text_sentiment,
    inputs=gr.Textbox(
        label="Enter text for sentiment analysis",
        placeholder="Type your text here...",
        lines=3
    ),
    outputs=gr.Label(label="Sentiment Analysis"),
    title="Sentiment Analysis Demo",
    description="Analyze the sentiment of your text",
    examples=[
        ["I love this product! It's absolutely amazing and works perfectly."],
        ["This is terrible. The worst experience I've ever had."],
        ["The weather is okay today, nothing special."],
        ["Great service, friendly staff, and excellent food quality!"]
    ]
)

if __name__ == "__main__":
    # Launch different interfaces
    print("Choose an interface to launch:")
    print("1. Hello World")
    print("2. Calculator")
    print("3. Image Filter")
    print("4. Progress Demo")
    print("5. Complex Form (Blocks)")
    print("6. Sentiment Analysis")

    choice = input("Enter choice (1-6): ")

    if choice == "1":
        iface.launch()
    elif choice == "2":
        calculator_interface.launch()
    elif choice == "3":
        image_interface.launch()
    elif choice == "4":
        progress_interface.launch()
    elif choice == "5":
        demo = create_blocks_demo()
        demo.launch()
    elif choice == "6":
        sentiment_interface.launch()
    else:
        print("Invalid choice. Launching Hello World...")
        iface.launch()

💻 Interface de Classification d'Images python

🟡 intermediate

Créer une interface interactive pour les modèles de classification d'images

# Image Classification with Gradio
# Complete implementation with multiple model options

import gradio as gr
import torch
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import time
import json

# 1. Configuration and Model Setup
CLASS_LABELS = {
    'imagenet': {
        'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4,
        'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9
    },
    'custom': {
        'cat': '🐱', 'dog': '🐕', 'bird': '🐦', 'fish': '🐠', 'rabbit': '🐰',
        'hamster': '🐹', 'turtle': '🐢', 'butterfly': '🦋', 'spider': '🕷️', 'snake': '🐍'
    }
}

# 2. Image Preprocessing
def preprocess_image(image, target_size=(224, 224)):
    """Preprocess image for model input"""
    if image is None:
        return None

    # Define transforms
    transform = transforms.Compose([
        transforms.Resize(target_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                           std=[0.229, 0.224, 0.225])
    ])

    # Convert PIL to RGB if necessary
    if image.mode != 'RGB':
        image = image.convert('RGB')

    # Apply transforms
    image_tensor = transform(image).unsqueeze(0)
    return image_tensor

# 3. Mock Model Class (Replace with actual model)
class MockImageClassifier:
    """Mock classifier for demonstration"""

    def __init__(self, model_type='custom'):
        self.model_type = model_type
        self.labels = CLASS_LABELS[model_type]

    def predict(self, image_tensor):
        """Mock prediction - replace with actual model inference"""
        time.sleep(0.5)  # Simulate processing time

        # Generate random predictions
        num_classes = len(self.labels)
        scores = np.random.dirichlet(np.ones(num_classes))

        # Create prediction dictionary
        predictions = {}
        for i, (label, _) in enumerate(self.labels.items()):
            predictions[label] = float(scores[i])

        return predictions

# 4. Classification Functions
def classify_image(image, model_type, confidence_threshold=0.1):
    """Main classification function"""
    if image is None:
        return "Please upload an image", None

    try:
        # Initialize model
        model = MockImageClassifier(model_type)

        # Preprocess image
        image_tensor = preprocess_image(image)
        if image_tensor is None:
            return "Error processing image", None

        # Get predictions
        predictions = model.predict(image_tensor)

        # Filter by confidence threshold
        filtered_predictions = {
            k: v for k, v in predictions.items()
            if v >= confidence_threshold
        }

        # Sort by confidence
        sorted_predictions = dict(
            sorted(filtered_predictions.items(), key=lambda x: x[1], reverse=True)
        )

        # Format results
        if not sorted_predictions:
            return "No confident predictions found", None

        # Create result message
        top_prediction = list(sorted_predictions.items())[0]
        result_message = f"Top prediction: {top_prediction[0]} ({top_prediction[1]:.2%} confidence)"

        # Add emojis for custom model
        if model_type == 'custom':
            top_3 = list(sorted_predictions.items())[:3]
            emoji_results = []
            for label, confidence in top_3:
                emoji = CLASS_LABELS['custom'].get(label, '🔍')
                emoji_results.append(f"{emoji} {label}: {confidence:.1%}")
            result_message += "\n\nTop 3:\n" + "\n".join(emoji_results)

        return result_message, sorted_predictions

    except Exception as e:
        return f"Error during classification: {str(e)}", None

def batch_classify(images, model_type):
    """Batch classification for multiple images"""
    if not images:
        return "No images provided", []

    results = []
    for i, image in enumerate(images):
        if image is not None:
            _, predictions = classify_image(image, model_type)
            results.append({
                'image_index': i + 1,
                'predictions': predictions or {},
                'status': 'success'
            })
        else:
            results.append({
                'image_index': i + 1,
                'predictions': {},
                'status': 'failed - no image'
            })

    summary = f"Processed {len([r for r in results if r['status'] == 'success'])}/{len(images)} images"
    return summary, results

# 5. Visualization Functions
def create_confidence_chart(predictions):
    """Create confidence visualization"""
    if not predictions:
        return None

    # Create bar chart data
    labels = list(predictions.keys())[:5]  # Top 5
    values = [predictions[label] for label in labels]

    # Return as dictionary for gr.BarPlot
    return {
        'labels': labels,
        'values': values
    }

def create_prediction_details(predictions):
    """Detailed predictions table"""
    if not predictions:
        return []

    details = []
    for label, confidence in predictions.items():
        details.append([label, f"{confidence:.2%}", "✅" if confidence > 0.5 else "⚠️"])

    return details

# 6. Interface Creation
def create_classification_interface():
    """Create the main classification interface"""

    with gr.Blocks(title="Image Classification Demo") as demo:
        gr.Markdown("# 🖼️ Image Classification Demo")
        gr.Markdown("Upload an image to classify it using different models")

        with gr.Row():
            with gr.Column(scale=2):
                # Input section
                image_input = gr.Image(
                    label="Upload Image",
                    type="pil",
                    height=300
                )

                model_choice = gr.Radio(
                    choices=["custom", "imagenet"],
                    label="Model Type",
                    value="custom"
                )

                confidence_slider = gr.Slider(
                    minimum=0.0,
                    maximum=1.0,
                    value=0.1,
                    step=0.05,
                    label="Confidence Threshold"
                )

                classify_btn = gr.Button("Classify Image", variant="primary")

            with gr.Column(scale=2):
                # Results section
                result_text = gr.Textbox(
                    label="Classification Result",
                    interactive=False,
                    lines=3
                )

                predictions_output = gr.Label(
                    label="All Predictions",
                    num_top_classes=10
                )

        with gr.Row():
            # Visualization
            confidence_chart = gr.BarPlot(
                title="Top 5 Predictions",
                x="label",
                y="confidence",
                height=300
            )

            details_table = gr.Dataframe(
                headers=["Class", "Confidence", "Status"],
                label="Detailed Results",
                datatype=["str", "str", "str"]
            )

        # Batch processing section
        with gr.Accordion("Batch Processing", open=False):
            gr.Markdown("Upload multiple images for batch classification")

            batch_images = gr.File(
                file_count="multiple",
                file_types=["image"],
                label="Upload Multiple Images"
            )

            batch_btn = gr.Button("Process Batch", variant="secondary")
            batch_result = gr.Textbox(label="Batch Result", interactive=False)

        # Example images
        gr.Examples(
            examples=[
                ["examples/cat.jpg", "custom"],
                ["examples/dog.jpg", "custom"],
                # Add more example paths as needed
            ],
            inputs=[image_input, model_choice],
            label="Example Images"
        )

        # Event handlers
        classify_btn.click(
            classify_image,
            inputs=[image_input, model_choice, confidence_slider],
            outputs=[result_text, predictions_output]
        ).then(
            create_confidence_chart,
            inputs=predictions_output,
            outputs=confidence_chart
        ).then(
            create_prediction_details,
            inputs=predictions_output,
            outputs=details_table
        )

        batch_btn.click(
            batch_classify,
            inputs=[batch_images, model_choice],
            outputs=[batch_result, details_table]
        )

    return demo

# 7. Advanced Features Interface
def create_advanced_interface():
    """Advanced interface with more features"""

    def analyze_image_properties(image):
        """Analyze image properties"""
        if image is None:
            return "No image uploaded", None

        # Get image properties
        width, height = image.size
        mode = image.mode
        format_name = image.format

        properties = f"""
        Image Properties:
        - Size: {width}x{height} pixels
        - Mode: {mode}
        - Format: {format_name}
        - Aspect Ratio: {width/height:.2f}
        """

        # Create visualization
        return properties, image

    def compare_models(image):
        """Compare multiple model predictions"""
        if image is None:
            return {}

        models = ["custom", "imagenet"]
        comparison = {}

        for model in models:
            _, predictions = classify_image(image, model)
            if predictions:
                top_pred = list(predictions.items())[0]
                comparison[f"{model}_model"] = {
                    "prediction": top_pred[0],
                    "confidence": top_pred[1]
                }

        return comparison

    with gr.Blocks(title="Advanced Image Classification") as demo:
        gr.Markdown("# 🔬 Advanced Image Analysis")

        with gr.Tabs():
            with gr.TabItem("Classification"):
                create_classification_interface()

            with gr.TabItem("Image Analysis"):
                with gr.Row():
                    img_input = gr.Image(type="pil", label="Upload Image")
                    with gr.Column():
                        props_text = gr.Textbox(label="Properties", interactive=False)
                        processed_img = gr.Image(label="Processed")

                img_input.change(
                    analyze_image_properties,
                    inputs=img_input,
                    outputs=[props_text, processed_img]
                )

            with gr.TabItem("Model Comparison"):
                comp_img = gr.Image(type="pil", label="Upload Image")
                comp_btn = gr.Button("Compare Models")
                comp_results = gr.JSON(label="Comparison Results")

                comp_btn.click(
                    compare_models,
                    inputs=comp_img,
                    outputs=comp_results
                )

    return demo

if __name__ == "__main__":
    print("Choose interface mode:")
    print("1. Basic Classification")
    print("2. Advanced Features")

    choice = input("Enter choice (1-2): ")

    if choice == "2":
        demo = create_advanced_interface()
    else:
        demo = create_classification_interface()

    demo.launch(share=True, debug=True)

💻 Interface de Chatbot python

🟡 intermediate

Construire une interface interactive de chatbot

# Gradio Chatbot Interface
# Complete chatbot with memory, streaming, and multiple AI models

import gradio as gr
import time
import random
import json
from datetime import datetime
from typing import List, Dict, Any, Optional

# 1. Chatbot Data Models
class Message:
    """Message model for chat conversations"""

    def __init__(self, role: str, content: str, timestamp: Optional[datetime] = None):
        self.role = role  # 'user' or 'assistant'
        self.content = content
        self.timestamp = timestamp or datetime.now()

    def to_dict(self):
        return {
            "role": self.role,
            "content": self.content,
            "timestamp": self.timestamp.isoformat()
        }

class ChatHistory:
    """Manage chat conversation history"""

    def __init__(self, max_messages: int = 50):
        self.messages: List[Message] = []
        self.max_messages = max_messages

    def add_message(self, role: str, content: str):
        """Add a new message to history"""
        message = Message(role, content)
        self.messages.append(message)

        # Limit history size
        if len(self.messages) > self.max_messages:
            self.messages = self.messages[-self.max_messages:]

    def get_conversation_context(self, last_n: int = 5) -> str:
        """Get formatted conversation context"""
        recent_messages = self.messages[-last_n:]
        context = ""

        for msg in recent_messages:
            prefix = "User" if msg.role == "user" else "Assistant"
            context += f"{prefix}: {msg.content}\n"

        return context

    def clear(self):
        """Clear all messages"""
        self.messages.clear()

    def to_gradio_format(self) -> List[List[str]]:
        """Convert to Gradio chatbot format"""
        formatted = []
        for msg in self.messages:
            if msg.role == "user":
                formatted.append([msg.content, None])
            else:
                if formatted and formatted[-1][1] is None:
                    formatted[-1][1] = msg.content
                else:
                    formatted.append([None, msg.content])
        return formatted

# 2. Response Generators
class AIResponseGenerator:
    """Mock AI response generator (replace with actual AI model)"""

    def __init__(self, model_name: str = "gpt-3.5-turbo"):
        self.model_name = model_name
        self.responses = {
            "greeting": [
                "Hello! How can I help you today?",
                "Hi there! What can I do for you?",
                "Greetings! How may I assist you?"
            ],
            "weather": [
                "I don't have access to real-time weather data, but I recommend checking a weather app or website for current conditions.",
                "Weather information isn't available to me directly. Try using a weather service for accurate forecasts."
            ],
            "help": [
                "I can help you with various tasks including answering questions, providing information, and engaging in conversation. What would you like to know?",
                "I'm here to assist you! Feel free to ask questions or start a conversation about any topic."
            ],
            "default": [
                "That's an interesting question! Let me think about that...",
                "I understand what you're asking. Here's my response:",
                "Great question! Based on what I know:",
                "Let me help you with that. I think:"
            ]
        }

        # Personality traits
        self.personality = {
            "friendly": True,
            "helpful": True,
            "professional": True
        }

    def generate_response(self, message: str, context: str = "") -> str:
        """Generate AI response based on input and context"""
        message_lower = message.lower()

        # Simple keyword-based responses
        if any(word in message_lower for word in ["hello", "hi", "hey"]):
            return random.choice(self.responses["greeting"])
        elif "weather" in message_lower:
            return random.choice(self.responses["weather"])
        elif "help" in message_lower:
            return random.choice(self.responses["help"])
        else:
            # Generate contextual response
            base_response = random.choice(self.responses["default"])

            # Add some context-aware content
            if "python" in message_lower:
                base_response += " Python is a versatile programming language great for web development, data science, and automation."
            elif "gradio" in message_lower:
                base_response += " Gradio is an excellent tool for creating machine learning demos with minimal code!"
            elif "machine learning" in message_lower:
                base_response += " Machine learning is a fascinating field that's transforming how we interact with technology."

            return base_response

    def generate_streaming_response(self, message: str, context: str = ""):
        """Generate streaming response word by word"""
        response = self.generate_response(message, context)
        words = response.split()

        partial_response = ""
        for word in words:
            partial_response += word + " "
            yield partial_response
            time.sleep(0.05)  # Simulate typing delay

# 3. Chatbot Functions
def respond_to_message(
    message: str,
    history: List[List[str]],
    chat_history_obj: Dict[str, Any],
    model_choice: str,
    stream: bool = False
):
    """Main response function for the chatbot"""

    if not message.strip():
        return history, "Please enter a message."

    # Add user message to history
    chat_history_obj["messages"].append(Message("user", message))

    # Get conversation context
    conversation_context = chat_history_obj["context"].get_conversation_context()

    # Initialize response generator
    generator = AIResponseGenerator(model_choice)

    if stream:
        # Streaming response
        response_parts = []
        for partial_response in generator.generate_streaming_response(message, conversation_context):
            response_parts.append(partial_response)

            # Update history with partial response
            temp_history = chat_history_obj["context"].to_gradio_format()
            if temp_history and temp_history[-1][1] is None:
                temp_history[-1][1] = partial_response
            else:
                temp_history.append([None, partial_response])

            yield temp_history, ""

        # Add final response to history
        final_response = " ".join(response_parts)
        chat_history_obj["messages"].append(Message("assistant", final_response))
        final_history = chat_history_obj["context"].to_gradio_format()

        yield final_history, ""
    else:
        # Regular response
        response = generator.generate_response(message, conversation_context)

        # Add assistant response to history
        chat_history_obj["messages"].append(Message("assistant", response))

        # Convert to Gradio format
        gradio_history = chat_history_obj["context"].to_gradio_format()

        return gradio_history, ""

def clear_chat(chat_history_obj: Dict[str, Any]):
    """Clear chat history"""
    chat_history_obj["messages"].clear()
    chat_history_obj["context"].clear()
    return [], "Chat history cleared."

def save_chat(chat_history_obj: Dict[str, Any]):
    """Save chat history to file"""
    if not chat_history_obj["messages"]:
        return "No messages to save."

    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"chat_history_{timestamp}.json"

    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump({
                "timestamp": datetime.now().isoformat(),
                "messages": [msg.to_dict() for msg in chat_history_obj["messages"]]
            }, f, indent=2, ensure_ascii=False)

        return f"Chat saved to {filename}"
    except Exception as e:
        return f"Error saving chat: {str(e)}"

# 4. Interface Creation
def create_chatbot_interface():
    """Create the main chatbot interface"""

    # Initialize chat history
    chat_histories = {}

    def get_chat_history(session_id: str = "default"):
        """Get or create chat history for session"""
        if session_id not in chat_histories:
            chat_histories[session_id] = {
                "messages": [],
                "context": ChatHistory()
            }
        return chat_histories[session_id]

    with gr.Blocks(title="AI Chatbot", theme=gr.themes.Soft()) as demo:
        gr.Markdown("# 🤖 AI Chatbot Interface")
        gr.Markdown("Chat with an AI assistant powered by multiple models")

        # Chat interface
        with gr.Row():
            with gr.Column(scale=3):
                chatbot = gr.Chatbot(
                    label="Conversation",
                    height=400,
                    show_copy_button=True,
                    avatar_images=["👤", "🤖"]
                )

                with gr.Row():
                    msg_input = gr.Textbox(
                        label="Your Message",
                        placeholder="Type your message here...",
                        scale=4
                    )
                    send_btn = gr.Button("Send", variant="primary", scale=1)

            with gr.Column(scale=1):
                # Controls
                model_choice = gr.Dropdown(
                    choices=["gpt-3.5-turbo", "gpt-4", "claude-3", "local-model"],
                    label="AI Model",
                    value="gpt-3.5-turbo"
                )

                stream_checkbox = gr.Checkbox(
                    label="Streaming Response",
                    value=True
                )

                # Action buttons
                with gr.Row():
                    clear_btn = gr.Button("Clear", size="sm")
                    save_btn = gr.Button("Save", size="sm")

                status_output = gr.Textbox(
                    label="Status",
                    interactive=False,
                    lines=2
                )

        # Examples
        gr.Examples(
            examples=[
                ["Hello, how are you?"],
                ["What can you help me with?"],
                ["Tell me about Gradio"],
                ["How do I learn Python?"],
                ["What's the weather like today?"]
            ],
            inputs=msg_input,
            label="Example Messages"
        )

        # Statistics
        with gr.Accordion("Chat Statistics", open=False):
            stats_text = gr.Textbox(
                label="Session Stats",
                interactive=False,
                value="No messages yet..."
            )

        # Event handlers
        def user_input(user_message, history, model, stream_enabled):
            """Handle user input"""
            chat_history = get_chat_history()

            # Create response generator function
            def response_generator():
                return respond_to_message(
                    user_message,
                    history,
                    chat_history,
                    model,
                    stream_enabled
                )

            return response_generator()

        def update_stats():
            """Update chat statistics"""
            chat_history = get_chat_history()
            messages = chat_history["messages"]
            user_msgs = [m for m in messages if m.role == "user"]
            ai_msgs = [m for m in messages if m.role == "assistant"]

            return f"""
            Total Messages: {len(messages)}
            User Messages: {len(user_msgs)}
            AI Responses: {len(ai_msgs)}
            Session Started: {messages[0].timestamp.strftime('%H:%M:%S') if messages else 'N/A'}
            """

        # Connect events
        msg_input.submit(
            user_input,
            inputs=[msg_input, chatbot, model_choice, stream_checkbox],
            outputs=[chatbot, status_output]
        )

        send_btn.click(
            user_input,
            inputs=[msg_input, chatbot, model_choice, stream_checkbox],
            outputs=[chatbot, status_output]
        ).then(
            lambda: "",  # Clear input
            outputs=[msg_input]
        ).then(
            update_stats,
            outputs=[stats_text]
        )

        clear_btn.click(
            lambda: (clear_chat(get_chat_history()), ""),
            outputs=[chatbot, status_output]
        ).then(
            update_stats,
            outputs=[stats_text]
        )

        save_btn.click(
            lambda save_chat(get_chat_history()),
            outputs=[status_output]
        )

    return demo

# 5. Multi-Chat Interface
def create_multi_chat_interface():
    """Interface with multiple chat rooms"""

    with gr.Blocks(title="Multi-Room Chatbot") as demo:
        gr.Markdown("# 🏢 Multi-Room Chat System")

        with gr.Tabs():
            with gr.TabItem("General Chat"):
                create_chatbot_interface()

            with gr.TabItem("Technical Support"):
                gr.Markdown("### Technical Support Assistant")
                gr.Markdown("Specialized for technical questions and troubleshooting")
                # Create specialized chatbot here
                gr.Textbox("Technical support chat coming soon...", interactive=False)

            with gr.TabItem("Creative Writing"):
                gr.Markdown("### Creative Writing Partner")
                gr.Markdown("Help with stories, poems, and creative content")
                # Create creative writing chatbot here
                gr.Textbox("Creative writing assistant coming soon...", interactive=False)

    return demo

if __name__ == "__main__":
    print("Choose chatbot mode:")
    print("1. Single Chat Interface")
    print("2. Multi-Room Chat")

    choice = input("Enter choice (1-2): ")

    if choice == "2":
        demo = create_multi_chat_interface()
    else:
        demo = create_chatbot_interface()

    demo.launch(share=True, debug=True)

💻 Visualisation de Données python

🔴 complex

Intégrer des graphiques et composants de visualisation de données

# Gradio Data Visualization Dashboard
# Complete dashboard with interactive charts and real-time updates

import gradio as gr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import time
import json
from datetime import datetime, timedelta
import random

# 1. Data Generation Functions
def generate_sales_data(days=30):
    """Generate sample sales data"""
    dates = pd.date_range(end=datetime.now(), periods=days)
    data = {
        'date': dates,
        'sales': np.random.normal(1000, 200, days) + np.sin(range(days)) * 100,
        'customers': np.random.poisson(50, days),
        'revenue': np.random.normal(5000, 1000, days),
        'category': np.random.choice(['Electronics', 'Clothing', 'Food', 'Books'], days)
    }
    return pd.DataFrame(data)

def generate_realtime_data():
    """Generate real-time data point"""
    return {
        'timestamp': datetime.now(),
        'value': random.gauss(100, 15),
        'category': random.choice(['A', 'B', 'C']),
        'status': random.choice(['active', 'inactive'])
    }

# 2. Chart Creation Functions
def create_line_chart(data):
    """Create interactive line chart"""
    if data.empty:
        return None

    fig = px.line(
        data,
        x='date',
        y='sales',
        title='Sales Trend Over Time',
        labels={'sales': 'Sales Amount', 'date': 'Date'}
    )

    fig.update_layout(
        hovermode='x unified',
        showlegend=True,
        height=400
    )

    return fig

def create_bar_chart(data):
    """Create category-wise bar chart"""
    if data.empty:
        return None

    category_data = data.groupby('category').agg({
        'sales': 'sum',
        'customers': 'sum'
    }).reset_index()

    fig = go.Figure()

    fig.add_trace(go.Bar(
        x=category_data['category'],
        y=category_data['sales'],
        name='Total Sales',
        marker_color='lightblue'
    ))

    fig.add_trace(go.Bar(
        x=category_data['category'],
        y=category_data['customers'],
        name='Total Customers',
        marker_color='lightgreen',
        yaxis='y2'
    ))

    fig.update_layout(
        title='Sales and Customers by Category',
        xaxis_title='Category',
        yaxis_title='Sales',
        yaxis2=dict(
            title='Customers',
            overlaying='y',
            side='right'
        ),
        barmode='group',
        height=400
    )

    return fig

def create_scatter_plot(data):
    """Create scatter plot"""
    if data.empty:
        return None

    fig = px.scatter(
        data,
        x='customers',
        y='revenue',
        color='category',
        size='sales',
        title='Customers vs Revenue by Category',
        hover_data=['date']
    )

    fig.update_layout(height=400)
    return fig

def create_pie_chart(data):
    """Create pie chart for category distribution"""
    if data.empty:
        return None

    category_counts = data['category'].value_counts()

    fig = go.Figure(data=[go.Pie(
        labels=category_counts.index,
        values=category_counts.values,
        hole=0.3,
        textinfo='label+percent+value'
    )])

    fig.update_layout(
        title='Sales Distribution by Category',
        height=400
    )

    return fig

def create_heatmap(data):
    """Create correlation heatmap"""
    if data.empty:
        return None

    # Create correlation matrix
    numeric_cols = ['sales', 'customers', 'revenue']
    corr_matrix = data[numeric_cols].corr()

    fig = go.Figure(data=go.Heatmap(
        z=corr_matrix.values,
        x=corr_matrix.columns,
        y=corr_matrix.columns,
        colorscale='RdBu',
        zmid=0,
        text=corr_matrix.values,
        texttemplate="%{text:.2f}",
        textfont={"size": 10}
    ))

    fig.update_layout(
        title='Correlation Matrix',
        height=300
    )

    return fig

def create_realtime_chart(history_data):
    """Create real-time updating chart"""
    if not history_data:
        return go.Figure()

    df = pd.DataFrame(history_data)

    fig = go.Figure()

    # Add main value line
    fig.add_trace(go.Scatter(
        x=df['timestamp'],
        y=df['value'],
        mode='lines+markers',
        name='Value',
        line=dict(color='blue', width=2)
    ))

    # Add moving average
    if len(df) > 5:
        df['moving_avg'] = df['value'].rolling(window=5).mean()
        fig.add_trace(go.Scatter(
            x=df['timestamp'],
            y=df['moving_avg'],
            mode='lines',
            name='Moving Avg',
            line=dict(color='red', width=2, dash='dash')
        ))

    fig.update_layout(
        title='Real-time Data Stream',
        xaxis_title='Time',
        yaxis_title='Value',
        height=300,
        showlegend=True
    )

    return fig

# 3. Dashboard Functions
def update_dashboard(date_range, category_filter):
    """Update all dashboard charts"""
    # Generate or filter data
    if date_range == "Last 7 days":
        days = 7
    elif date_range == "Last 30 days":
        days = 30
    else:
        days = 90

    data = generate_sales_data(days)

    # Apply category filter
    if category_filter != "All":
        data = data[data['category'] == category_filter]

    # Create charts
    line_chart = create_line_chart(data)
    bar_chart = create_bar_chart(data)
    scatter_plot = create_scatter_plot(data)
    pie_chart = create_pie_chart(data)
    heatmap = create_heatmap(data)

    # Calculate statistics
    stats = calculate_statistics(data)

    return line_chart, bar_chart, scatter_plot, pie_chart, heatmap, stats

def calculate_statistics(data):
    """Calculate dashboard statistics"""
    if data.empty:
        return "No data available"

    stats = {
        'Total Sales': f"${data['sales'].sum():,.2f}",
        'Avg Daily Sales': f"${data['sales'].mean():,.2f}",
        'Total Customers': f"{data['customers'].sum():,}",
        'Avg Revenue': f"${data['revenue'].mean():,.2f}",
        'Best Category': data.groupby('category')['sales'].sum().idxmax(),
        'Peak Day': data.loc[data['sales'].idxmax(), 'date'].strftime('%Y-%m-%d')
    }

    stats_text = "📊 Dashboard Statistics\n\n"
    for key, value in stats.items():
        stats_text += f"**{key}:** {value}\n"

    return stats_text

def update_realtime_data(history_data):
    """Update real-time data"""
    new_data = generate_realtime_data()
    history_data.append(new_data)

    # Keep only last 20 points
    if len(history_data) > 20:
        history_data = history_data[-20:]

    chart = create_realtime_chart(history_data)

    return chart, history_data

# 4. Main Dashboard Interface
def create_dashboard():
    """Create the main dashboard interface"""

    # Initialize real-time data
    realtime_data = []

    with gr.Blocks(title="Data Visualization Dashboard", theme=gr.themes.Soft()) as demo:
        gr.Markdown("# 📈 Data Visualization Dashboard")
        gr.Markdown("Interactive dashboard with real-time updates and multiple chart types")

        # Controls
        with gr.Row():
            date_range = gr.Dropdown(
                choices=["Last 7 days", "Last 30 days", "Last 90 days"],
                label="Time Period",
                value="Last 30 days"
            )

            category_filter = gr.Dropdown(
                choices=["All", "Electronics", "Clothing", "Food", "Books"],
                label="Category Filter",
                value="All"
            )

            refresh_btn = gr.Button("Refresh Data", variant="primary")

        # Statistics
        with gr.Row():
            stats_display = gr.Markdown("📊 Loading statistics...")

        # Charts Grid
        with gr.Row():
            with gr.Column():
                line_chart = gr.Plot(label="Sales Trend")
            with gr.Column():
                bar_chart = gr.Plot(label="Category Analysis")

        with gr.Row():
            with gr.Column():
                scatter_plot = gr.Plot(label="Performance Analysis")
            with gr.Column():
                pie_chart = gr.Plot(label="Distribution")

        # Correlation and Real-time
        with gr.Row():
            with gr.Column():
                heatmap = gr.Plot(label="Correlation Matrix")
            with gr.Column():
                realtime_chart = gr.Plot(label="Real-time Data")

                realtime_refresh = gr.Button("Update Real-time", variant="secondary")

        # Data Table
        with gr.Accordion("Raw Data", open=False):
            data_table = gr.Dataframe(
                label="Sales Data",
                datatype=["date", "number", "number", "number", "str"]
            )

        # Export functionality
        with gr.Row():
            export_btn = gr.Button("Export to CSV", variant="secondary")
            export_status = gr.Textbox(label="Export Status", interactive=False)

        # Event handlers
        def refresh_dashboard(date_range, category_filter):
            """Refresh all dashboard components"""
            line, bar, scatter, pie, heat, stats = update_dashboard(date_range, category_filter)

            # Get sample data for table
            if date_range == "Last 7 days":
                days = 7
            elif date_range == "Last 30 days":
                days = 30
            else:
                days = 90

            sample_data = generate_sales_data(days)
            if category_filter != "All":
                sample_data = sample_data[sample_data['category'] == category_filter]

            return line, bar, scatter, pie, heat, stats, sample_data.head(10)

        def update_realtime():
            """Update real-time chart"""
            nonlocal realtime_data
            chart, realtime_data = update_realtime_data(realtime_data)
            return chart

        def export_data(date_range, category_filter):
            """Export data to CSV"""
            try:
                if date_range == "Last 7 days":
                    days = 7
                elif date_range == "Last 30 days":
                    days = 30
                else:
                    days = 90

                data = generate_sales_data(days)
                if category_filter != "All":
                    data = data[data['category'] == category_filter]

                filename = f"sales_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
                data.to_csv(filename, index=False)

                return f"Data exported to {filename}"
            except Exception as e:
                return f"Export failed: {str(e)}"

        # Connect events
        refresh_btn.click(
            refresh_dashboard,
            inputs=[date_range, category_filter],
            outputs=[line_chart, bar_chart, scatter_plot, pie_chart, heatmap, stats_display, data_table]
        )

        date_range.change(
            refresh_dashboard,
            inputs=[date_range, category_filter],
            outputs=[line_chart, bar_chart, scatter_plot, pie_chart, heatmap, stats_display, data_table]
        )

        category_filter.change(
            refresh_dashboard,
            inputs=[date_range, category_filter],
            outputs=[line_chart, bar_chart, scatter_plot, pie_chart, heatmap, stats_display, data_table]
        )

        realtime_refresh.click(
            update_realtime,
            outputs=[realtime_chart]
        )

        export_btn.click(
            export_data,
            inputs=[date_range, category_filter],
            outputs=[export_status]
        )

        # Initial load
        demo.load(
            refresh_dashboard,
            inputs=[date_range, category_filter],
            outputs=[line_chart, bar_chart, scatter_plot, pie_chart, heatmap, stats_display, data_table]
        )

    return demo

# 5. Advanced Analytics Dashboard
def create_analytics_dashboard():
    """Advanced dashboard with ML insights"""

    def predict_trends(data):
        """Simple trend prediction"""
        if data.empty:
            return "No data for prediction"

        # Simple linear regression for trend
        x = np.arange(len(data))
        y = data['sales'].values

        # Calculate trend
        trend = np.polyfit(x, y, 1)
        slope = trend[0]

        if slope > 0:
            return "📈 **Upward Trend**: Sales are increasing over time"
        elif slope < 0:
            return "📉 **Downward Trend**: Sales are decreasing over time"
        else:
            return "➡️ **Stable**: Sales are relatively stable"

    def detect_anomalies(data):
        """Detect anomalies in sales data"""
        if data.empty:
            return []

        # Simple anomaly detection using z-score
        sales_mean = data['sales'].mean()
        sales_std = data['sales'].std()

        anomalies = data[abs(data['sales'] - sales_mean) > 2 * sales_std]

        return [
            f"Anomaly on {row['date'].strftime('%Y-%m-%d')}: Sales ${row['sales']:.2f}"
            for _, row in anomalies.iterrows()
        ]

    with gr.Blocks(title="Advanced Analytics Dashboard") as demo:
        gr.Markdown("# 🔬 Advanced Analytics Dashboard")

        with gr.Tabs():
            with gr.TabItem("Overview"):
                create_dashboard()

            with gr.TabItem("Predictions"):
                gr.Markdown("## Sales Trend Analysis")

                prediction_date_range = gr.Dropdown(
                    choices=["Last 7 days", "Last 30 days", "Last 90 days"],
                    label="Analysis Period",
                    value="Last 30 days"
                )

                predict_btn = gr.Button("Analyze Trends", variant="primary")

                trend_result = gr.Markdown("Click analyze to see predictions...")

                anomaly_output = gr.JSON(label="Anomaly Detection")

                predict_btn.click(
                    lambda period: (
                        predict_trends(generate_sales_data(7 if "7" in period else 30 if "30" in period else 90)),
                        detect_anomalies(generate_sales_data(7 if "7" in period else 30 if "30" in period else 90))
                    ),
                    inputs=[prediction_date_range],
                    outputs=[trend_result, anomaly_output]
                )

    return demo

if __name__ == "__main__":
    print("Choose dashboard type:")
    print("1. Standard Dashboard")
    print("2. Advanced Analytics")

    choice = input("Enter choice (1-2): ")

    if choice == "2":
        demo = create_analytics_dashboard()
    else:
        demo = create_dashboard()

    demo.launch(share=True, debug=True)