🎯 Exemplos recomendados
Balanced sample collections from various categories for you to explore
Exemplos de Bibliotecas Python ML
Exemplos abrangentes de Machine Learning em Python incluindo scikit-learn, TensorFlow e PyTorch para ciência de dados e desenvolvimento de IA
💻 Fundamentos do Scikit-learn
🟢 simple
⭐⭐
Exemplos básicos de scikit-learn cobrindo classificação, regressão, clustering e pré-processamento de dados
⏱️ 30 min
🏷️ scikit-learn, machine-learning, data-science
Prerequisites:
Python basics, NumPy, pandas, Matplotlib
# Scikit-learn Fundamentals
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.svm import SVC, SVR
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, classification_report, mean_squared_error, r2_score
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
# 1. Classification Example
def classification_example():
# Load dataset
iris = datasets.load_iris()
X, y = iris.data, iris.target
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Scale features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Train models
models = {
'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42),
'Logistic Regression': LogisticRegression(random_state=42),
'SVM': SVC(random_state=42)
}
results = {}
for name, model in models.items():
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
accuracy = accuracy_score(y_test, y_pred)
results[name] = accuracy
print(f"{name} Accuracy: {accuracy:.4f}")
return results
# 2. Regression Example
def regression_example():
# Generate synthetic data
np.random.seed(42)
X = np.random.randn(100, 5)
y = 2 * X[:, 0] + 3 * X[:, 1] - X[:, 2] + 0.5 * X[:, 3] + np.random.randn(100) * 0.1
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train models
models = {
'Random Forest': RandomForestRegressor(n_estimators=100, random_state=42),
'Linear Regression': LinearRegression(),
'SVR': SVR(kernel='rbf')
}
results = {}
for name, model in models.items():
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
results[name] = {'MSE': mse, 'R2': r2}
print(f"{name} - MSE: {mse:.4f}, R2: {r2:.4f}")
return results
# 3. Clustering Example
def clustering_example():
# Generate synthetic data
np.random.seed(42)
X, _ = datasets.make_blobs(n_samples=300, centers=4, cluster_std=1.0, random_state=42)
# Apply K-means clustering
kmeans = KMeans(n_clusters=4, random_state=42)
cluster_labels = kmeans.fit_predict(X)
# Visualize results
plt.figure(figsize=(10, 6))
plt.scatter(X[:, 0], X[:, 1], c=cluster_labels, cmap='viridis', alpha=0.6)
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1],
marker='x', s=300, c='red', label='Centroids')
plt.title('K-means Clustering')
plt.legend()
plt.show()
return cluster_labels
# 4. Text Processing Example
def text_classification_example():
# Sample text data
texts = [
"I love this movie, it's fantastic!",
"This movie is terrible and boring.",
"Great acting and amazing storyline.",
"Worst movie I have ever seen.",
"Absolutely brilliant and captivating.",
"Poor acting and weak plot."
]
labels = [1, 0, 1, 0, 1, 0] # 1: positive, 0: negative
# Vectorize text
vectorizer = TfidfVectorizer(max_features=1000)
X = vectorizer.fit_transform(texts)
# Train classifier
X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.3, random_state=42)
classifier = LogisticRegression(random_state=42)
classifier.fit(X_train, y_train)
# Evaluate
y_pred = classifier.predict(X_test)
print("Text Classification Results:")
print(classification_report(y_test, y_pred))
return classifier, vectorizer
# 5. Data Preprocessing Pipeline
def preprocessing_pipeline():
# Sample data with missing values and categorical features
data = {
'age': [25, 30, 35, np.nan, 45, 50, 55, 60],
'income': [50000, 60000, 70000, 80000, 90000, np.nan, 110000, 120000],
'education': ['High School', 'Bachelor', 'Master', 'PhD', 'Bachelor', 'Master', 'PhD', 'Bachelor'],
'target': [0, 1, 1, 1, 0, 1, 1, 0]
}
df = pd.DataFrame(data)
# Handle missing values
df.fillna({
'age': df['age'].mean(),
'income': df['income'].median()
}, inplace=True)
# Encode categorical variables
le = LabelEncoder()
df['education_encoded'] = le.fit_transform(df['education'])
# Prepare features and target
features = ['age', 'income', 'education_encoded']
X = df[features]
y = df['target']
print("Preprocessed Data:")
print(df.head())
return X, y, le
# Run all examples
if __name__ == "__main__":
print("=== Scikit-learn Examples ===")
print("\n1. Classification Results:")
classification_results = classification_example()
print("\n2. Regression Results:")
regression_results = regression_example()
print("\n3. Clustering Results:")
cluster_labels = clustering_example()
print("\n4. Text Classification:")
classifier, vectorizer = text_classification_example()
print("\n5. Data Preprocessing:")
X, y, encoder = preprocessing_pipeline()
💻 Deep Learning com TensorFlow
🟡 intermediate
⭐⭐⭐⭐
Construir redes neurais com TensorFlow para classificação, regressão e desenvolvimento de modelos personalizados
⏱️ 45 min
🏷️ tensorflow, keras, deep-learning, neural-networks
Prerequisites:
Python basics, NumPy, Machine Learning basics, Neural Networks
# TensorFlow Deep Learning Examples
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# 1. Basic Neural Network for Classification
def neural_network_classification():
# Load MNIST dataset
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess data
x_train = x_train.reshape(-1, 28*28).astype('float32') / 255.0
x_test = x_test.reshape(-1, 28*28).astype('float32') / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Build model
model = keras.Sequential([
layers.Dense(512, activation='relu', input_shape=(784,)),
layers.Dropout(0.3),
layers.Dense(256, activation='relu'),
layers.Dropout(0.3),
layers.Dense(10, activation='softmax')
])
# Compile model
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Train model
history = model.fit(
x_train, y_train,
batch_size=128,
epochs=10,
validation_split=0.2,
verbose=1
)
# Evaluate model
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {test_acc:.4f}")
return model, history
# 2. CNN for Image Classification
def convnet_classification():
# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# Preprocess data
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Build CNN model
model = keras.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
# Compile model
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Train model
history = model.fit(
x_train, y_train,
batch_size=64,
epochs=20,
validation_split=0.2,
verbose=1
)
return model, history
# 3. Regression Neural Network
def neural_network_regression():
# Generate synthetic regression data
np.random.seed(42)
X = np.random.randn(1000, 10)
y = 2 * X[:, 0] + 3 * X[:, 1] - X[:, 2] + np.random.randn(1000) * 0.1
# Split and scale data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Build regression model
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=(10,)),
layers.Dense(32, activation='relu'),
layers.Dense(1) # Linear activation for regression
])
# Compile model
model.compile(
optimizer='adam',
loss='mse',
metrics=['mae']
)
# Train model
history = model.fit(
X_train_scaled, y_train,
batch_size=32,
epochs=100,
validation_split=0.2,
verbose=0
)
# Evaluate model
test_loss, test_mae = model.evaluate(X_test_scaled, y_test)
print(f"Test MAE: {test_mae:.4f}")
return model, history
# 4. Custom Model Class
class CustomModel(keras.Model):
def __init__(self, units=32, **kwargs):
super(CustomModel, self).__init__(**kwargs)
self.dense1 = layers.Dense(units, activation='relu')
self.dense2 = layers.Dense(units, activation='relu')
self.dropout = layers.Dropout(0.3)
self.output_layer = layers.Dense(1)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dropout(x, training=training)
x = self.dense2(x)
return self.output_layer(x)
# 5. Transfer Learning Example
def transfer_learning():
# Load pre-trained MobileNetV2
base_model = keras.applications.MobileNetV2(
input_shape=(224, 224, 3),
include_top=False,
weights='imagenet'
)
# Freeze base layers
base_model.trainable = False
# Add custom classification head
model = keras.Sequential([
base_model,
layers.GlobalAveragePooling2D(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.2),
layers.Dense(10, activation='softmax') # Assuming 10 classes
])
# Compile model
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
# 6. Callbacks and Model Saving
def callbacks_example():
# Define callbacks
callbacks = [
keras.callbacks.EarlyStopping(patience=5, restore_best_weights=True),
keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=3),
keras.callbacks.ModelCheckpoint(
'best_model.h5',
save_best_only=True,
monitor='val_loss'
)
]
return callbacks
# 7. Data Augmentation
def data_augmentation_example():
# Create data augmentation layer
data_augmentation = keras.Sequential([
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
layers.RandomContrast(0.1)
])
# Sample model with augmentation
model = keras.Sequential([
data_augmentation,
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(10, activation='softmax')
])
return model
# Run examples
if __name__ == "__main__":
print("=== TensorFlow Examples ===")
print("\n1. Neural Network Classification:")
nn_model, nn_history = neural_network_classification()
print("\n2. CNN Classification:")
cnn_model, cnn_history = convnet_classification()
print("\n3. Neural Network Regression:")
reg_model, reg_history = neural_network_regression()
print("\n4. Transfer Learning Model:")
transfer_model = transfer_learning()
print("\n5. Data Augmentation Model:")
aug_model = data_augmentation_example()
💻 Fundamentos do PyTorch
🟡 intermediate
⭐⭐⭐⭐
Desenvolvimento de redes neurais dinâmicas com PyTorch para pesquisa e aplicações de machine learning em produção
⏱️ 40 min
🏷️ pytorch, deep-learning, neural-networks
Prerequisites:
Python basics, Machine Learning basics, Neural Networks
# PyTorch Deep Learning Examples
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, random_split
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
# 1. Basic Tensors and Operations
def tensor_operations():
print("=== PyTorch Tensor Operations ===")
# Create tensors
x = torch.randn(3, 4)
y = torch.randn(3, 4)
# Basic operations
z = x + y
z = torch.add(x, y)
z = x * y # Element-wise multiplication
print("Tensor x:\n", x)
print("Tensor y:\n", y)
print("Element-wise multiplication:\n", z)
# Matrix multiplication
a = torch.randn(4, 3)
b = torch.randn(3, 5)
c = torch.mm(a, b) # or torch.matmul(a, b)
print("Matrix multiplication (4x3 @ 3x5 = 4x5):\n", c.shape)
# Gradient computation
x = torch.tensor(2.0, requires_grad=True)
y = x**2 + 3*x + 1
y.backward()
print(f"Gradient of y=x^2+3x+1 at x=2: {x.grad}")
# 2. Simple Neural Network
class SimpleNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
def simple_nn_example():
print("\n=== Simple Neural Network ===")
# Generate synthetic data
torch.manual_seed(42)
X = torch.randn(100, 10)
y = torch.randint(0, 3, (100,)) # 3 classes
# Create dataset and dataloader
dataset = TensorDataset(X, y)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = random_split(dataset, [train_size, test_size])
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=16)
# Initialize model
model = SimpleNN(input_size=10, hidden_size=32, output_size=3)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training loop
epochs = 20
for epoch in range(epochs):
model.train()
for batch_X, batch_y in train_loader:
optimizer.zero_grad()
outputs = model(batch_X)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()
if (epoch + 1) % 5 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
# Evaluation
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_X, batch_y in test_loader:
outputs = model(batch_X)
_, predicted = torch.max(outputs.data, 1)
total += batch_y.size(0)
correct += (predicted == batch_y).sum().item()
print(f'Test Accuracy: {100 * correct / total:.2f}%')
# 3. CNN for Image Classification
class SimpleCNN(nn.Module):
def __init__(self, num_classes=10):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(64 * 7 * 7, 128)
self.fc2 = nn.Linear(128, num_classes)
self.dropout = nn.Dropout(0.25)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool(self.relu(self.conv1(x))) # 28x28 -> 14x14
x = self.pool(self.relu(self.conv2(x))) # 14x14 -> 7x7
x = x.view(-1, 64 * 7 * 7)
x = self.dropout(self.relu(self.fc1(x)))
x = self.fc2(x)
return x
def cnn_mnist_example():
print("\n=== CNN for MNIST Classification ===")
# Load MNIST dataset
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = torchvision.datasets.MNIST(
root='./data', train=True, download=True, transform=transform
)
test_dataset = torchvision.datasets.MNIST(
root='./data', train=False, download=True, transform=transform
)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64)
# Initialize model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SimpleCNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training
epochs = 5
for epoch in range(epochs):
model.train()
running_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f'Epoch {epoch+1}, Loss: {running_loss/len(train_loader):.4f}')
# Testing
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
print(f'Test Accuracy: {100 * correct / total:.2f}%')
# 4. Custom Dataset Class
from torch.utils.data import Dataset
class CustomDataset(Dataset):
def __init__(self, data, targets, transform=None):
self.data = data
self.targets = targets
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
label = self.targets[idx]
if self.transform:
sample = self.transform(sample)
return sample, label
# 5. Transfer Learning with Pre-trained Models
def transfer_learning_example():
print("\n=== Transfer Learning Example ===")
# Load pre-trained ResNet
model = torchvision.models.resnet18(pretrained=True)
# Freeze all layers
for param in model.parameters():
param.requires_grad = False
# Replace final layer
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, 10) # 10 classes for CIFAR-10
# Only train the final layer
for param in model.fc.parameters():
param.requires_grad = True
print("Transfer learning model created successfully")
return model
# 6. GPU Usage
def gpu_example():
print("\n=== GPU Usage Example ===")
# Check if CUDA is available
if torch.cuda.is_available():
device = torch.device('cuda')
print(f"GPU available: {torch.cuda.get_device_name()}")
else:
device = torch.device('cpu')
print("Using CPU")
# Move tensors to GPU
x = torch.randn(1000, 1000).to(device)
y = torch.randn(1000, 1000).to(device)
# GPU computation
z = torch.mm(x, y)
print(f"Matrix multiplication completed on {device}")
# 7. Model Saving and Loading
def save_load_model_example():
print("\n=== Model Saving and Loading ===")
# Create and train a simple model
model = SimpleNN(10, 32, 3)
# Save model state dict
torch.save(model.state_dict(), 'model.pth')
print("Model saved successfully")
# Load model
loaded_model = SimpleNN(10, 32, 3)
loaded_model.load_state_dict(torch.load('model.pth'))
print("Model loaded successfully")
# Save entire model
torch.save(model, 'entire_model.pth')
loaded_entire_model = torch.load('entire_model.pth')
print("Entire model saved and loaded successfully")
# Run all examples
if __name__ == "__main__":
tensor_operations()
simple_nn_example()
cnn_mnist_example()
transfer_learning_example()
gpu_example()
save_load_model_example()