🎯 Exemplos recomendados
Balanced sample collections from various categories for you to explore
Exemplos Linode Cloud Service
Exemplos da Linode incluindo instâncias, Object Storage, Load Balancers e Kubernetes
💻 Gerenciamento Instâncias Linode python
🟢 simple
⭐⭐
Criar, configurar e gerenciar instâncias Linode usando API v4
⏱️ 20 min
🏷️ linode, api, instances, infrastructure
Prerequisites:
Linode account, API token, Python knowledge
# Linode Instance Management
# Python - linode_manager.py + requirements.txt
import os
import time
import requests
import json
from datetime import datetime
class LinodeManager:
def __init__(self, personal_access_token):
self.token = personal_access_token
self.base_url = "https://api.linode.com/v4"
self.headers = {
"Authorization": f"Bearer {self.token}",
"Content-Type": "application/json",
"X-Filter": json.dumps({})
}
def _make_request(self, method, endpoint, data=None, params=None):
"""Make HTTP request to Linode API"""
url = f"{self.base_url}/{endpoint}"
if params:
# Remove X-Filter header when using params
headers = {k: v for k, v in self.headers.items() if k != "X-Filter"}
response = requests.request(method, url, headers=headers, json=data, params=params)
else:
response = requests.request(method, url, headers=self.headers, json=data)
response.raise_for_status()
return response.json()
def create_linode(self, label, region, type, image, root_pass=None, ssh_keys=None, tags=None):
"""Create a new Linode instance"""
data = {
"label": label,
"region": region,
"type": type,
"image": image,
"root_pass": root_pass or self._generate_password(),
"authorized_keys": ssh_keys or [],
"tags": tags or [],
"private_ip": True,
"backups_enabled": False
}
try:
result = self._make_request("POST", "linode/instances", data)
linode = result["data"]
print(f"✅ Linode '{label}' created successfully!")
print(f" ID: {linode['id']}")
print(f" Status: {linode['status']}")
print(f" IPv4: {linode['ipv4']}")
print(f" IPv6: {linode['ipv6']}")
print(f" Region: {linode['region']}")
print(f" Type: {linode['type']}")
print(f" Image: {linode['image']}")
if linode.get('tags'):
print(f" Tags: {', '.join(linode['tags'])}")
return linode
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error creating Linode: {error_detail['errors'][0]['reason']}")
return None
def list_linodes(self, tags=None):
"""List all Linode instances"""
params = {}
if tags:
params['tags'] = tags
try:
result = self._make_request("GET", "linode/instances", params=params)
linodes = result["data"]
print(f"📋 Found {len(linodes)} Linode instance(s):")
print("-" * 70)
for linode in linodes:
status_emoji = {
"running": "🟢",
"offline": "🔴",
"booting": "🟡",
"rebooting": "🔄",
"shutting_down": "🟠"
}.get(linode["status"], "⚪")
print(f"{status_emoji} {linode['label']} (ID: {linode['id']})")
print(f" Status: {linode['status']}")
print(f" IPv4: {', '.join(linode['ipv4'])}")
print(f" Region: {linode['region']}")
print(f" Type: {linode['type']}")
print(f" Image: {linode['image']}")
print(f" Created: {linode['created']}")
if linode.get('tags'):
print(f" Tags: {', '.join(linode['tags'])}")
print()
return linodes
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error listing Linodes: {error_detail['errors'][0]['reason']}")
return []
def get_linode_details(self, linode_id):
"""Get detailed information about a specific Linode"""
try:
result = self._make_request("GET", f"linode/instances/{linode_id}")
linode = result["data"]
print(f"ℹ️ Linode Details for '{linode['label']}':")
print("-" * 60)
print(f"ID: {linode['id']}")
print(f"Label: {linode['label']}")
print(f"Status: {linode['status']}")
print(f"Group: {linode.get('group', 'None')}")
print(f"Region: {linode['region']}")
print(f"Type: {linode['type']}")
print(f"Image: {linode['image']}")
print(f"Specs: {linode['specs']['disk']}GB disk, {linode['specs']['memory']}MB RAM, {linode['specs']['vcpus']} CPU")
print(f"Transfer: {linode['specs']['transfer']}GB/month")
print(f"IPv4: {', '.join(linode['ipv4'])}")
print(f"IPv6: {linode['ipv6']}")
print(f"Private IPv4: {linode['ipv4_private']}")
print(f"Created: {linode['created']}")
print(f"Updated: {linode['updated']}")
if linode.get('tags'):
print(f"Tags: {', '.join(linode['tags'])}")
return linode
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error getting Linode details: {error_detail['errors'][0]['reason']}")
return None
def power_on_linode(self, linode_id):
"""Power on a Linode"""
try:
result = self._make_request("POST", f"linode/instances/{linode_id}/boot")
action = result["data"]
print(f"🔌 Power on initiated for Linode {linode_id}")
print(f" Action ID: {action['id']}")
print(f" Status: {action['status']}")
return action
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error powering on Linode: {error_detail['errors'][0]['reason']}")
return None
def power_off_linode(self, linode_id):
"""Power off a Linode"""
try:
result = self._make_request("POST", f"linode/instances/{linode_id}/shutdown")
action = result["data"]
print(f"🔌 Power off initiated for Linode {linode_id}")
print(f" Action ID: {action['id']}")
print(f" Status: {action['status']}")
return action
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error powering off Linode: {error_detail['errors'][0]['reason']}")
return None
def reboot_linode(self, linode_id):
"""Reboot a Linode"""
try:
result = self._make_request("POST", f"linode/instances/{linode_id}/reboot")
action = result["data"]
print(f"🔄 Reboot initiated for Linode {linode_id}")
print(f" Action ID: {action['id']}")
print(f" Status: {action['status']}")
return action
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error rebooting Linode: {error_detail['errors'][0]['reason']}")
return None
def delete_linode(self, linode_id):
"""Delete a Linode (PERMANENT!)"""
# First get linode details for confirmation
details_result = self._make_request("GET", f"linode/instances/{linode_id}")
linode = details_result["data"]
print(f"⚠️ WARNING: You are about to delete Linode:")
print(f" Label: {linode['label']}")
print(f" ID: {linode['id']}")
print(f" Status: {linode['status']}")
print(f" IPv4: {', '.join(linode['ipv4'])}")
confirm = input("\nAre you sure you want to delete this Linode? This cannot be undone! (yes/no): ")
if confirm.lower() != 'yes':
print("❌ Deletion cancelled")
return False
try:
self._make_request("DELETE", f"linode/instances/{linode_id}")
print(f"✅ Linode {linode_id} deleted successfully")
return True
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error deleting Linode: {error_detail['errors'][0]['reason']}")
return False
def resize_linode(self, linode_id, new_type):
"""Resize a Linode to a different type"""
data = {"type": new_type}
try:
result = self._make_request("POST", f"linode/instances/{linode_id}/resize", data)
action = result["data"]
print(f"📏 Resize initiated for Linode {linode_id}")
print(f" New type: {new_type}")
print(f" Action ID: {action['id']}")
print(f" Status: {action['status']}")
return action
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error resizing Linode: {error_detail['errors'][0]['reason']}")
return None
def create_snapshot(self, linode_id, label):
"""Create a snapshot backup of a Linode"""
data = {"label": label}
try:
result = self._make_request("POST", f"linode/instances/{linode_id}/backups", data)
backup = result["data"]
print(f"📸 Snapshot '{label}' creation initiated")
print(f" Backup ID: {backup['id']}")
print(f" Status: {backup['status']}")
return backup
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error creating snapshot: {error_detail['errors'][0]['reason']}")
return None
def list_regions(self):
"""List available Linode regions"""
try:
result = self._make_request("GET", "regions")
regions = result["data"]
print(f"🌍 Available Linode Regions:")
print("-" * 40)
for region in regions:
print(f"{region['id']:6} - {region['label']} ({region['country']})")
if region.get('capabilities'):
caps = ', '.join(region['capabilities'][:3]) # Show first 3 capabilities
print(f" Capabilities: {caps}{'...' if len(region['capabilities']) > 3 else ''}")
print()
return regions
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error listing regions: {error_detail['errors'][0]['reason']}")
return []
def list_linode_types(self):
"""List available Linode types"""
try:
result = self._make_request("GET", "linode/types")
types = result["data"]
print(f"💻 Available Linode Types:")
print("-" * 70)
for linode_type in types:
price_monthly = next((p['monthly'] for p in linode_type['price'] if p['label'] == 'Monthly'), 0)
print(f"{linode_type['id']:20} - ${price_monthly:.2f}/month")
print(f"{'':20} {linode_type['label']}")
print(f"{'':20} {linode_type['vcpus']} CPU, {linode_type['memory']}MB RAM, {linode_type['disk']}GB disk")
print(f"{'':20} Transfer: {linode_type['transfer']}GB/month")
print()
return types
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error listing Linode types: {error_detail['errors'][0]['reason']}")
return []
def list_available_images(self):
"""List available Linode images"""
try:
result = self._make_request("GET", "images")
images = result["data"]
print(f"🖼️ Available Linode Images:")
print("-" * 70)
# Filter out deprecated and custom images
available_images = [img for img in images if img['status'] == 'available' and not img['deprecated']]
for image in available_images[:20]: # Show first 20 images
print(f"{image['id']:30} - {image['label']}")
print(f"{'':30} Vendor: {image.get('vendor', 'Unknown')}")
print(f"{'':30} Size: {image.get('size', 'Unknown')}MB")
print()
if len(available_images) > 20:
print(f"... and {len(available_images) - 20} more images")
return images
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error listing images: {error_detail['errors'][0]['reason']}")
return []
def get_action_status(self, action_id):
"""Check the status of an action"""
try:
result = self._make_request("GET", f"account/actions/{action_id}")
action = result["data"]
status_emoji = {
"pending": "⏳",
"running": "🔄",
"finished": "✅",
"failed": "❌",
"notification": "📢"
}.get(action['status'], "⚪")
print(f"{status_emoji} Action {action_id} Status: {action['status']}")
print(f" Type: {action['type']}")
print(f" Started: {action['started']}")
print(f" Completed: {action.get('completed', 'N/A')}")
print(f" Percent Complete: {action.get('percent_complete', 0)}%")
if action.get('entity'):
print(f" Entity: {action['entity']['label']} (ID: {action['entity']['id']})")
return action
except requests.exceptions.HTTPError as e:
error_detail = e.response.json()
print(f"❌ Error getting action status: {error_detail['errors'][0]['reason']}")
return None
def _generate_password(self, length=16):
"""Generate a random password"""
import random
import string
chars = string.ascii_letters + string.digits + "!@#$%^&*"
password = ''.join(random.choice(chars) for _ in range(length))
return password
# Example usage
if __name__ == "__main__":
# Get API token from environment variable
api_token = os.environ.get('LINODE_API_TOKEN')
if not api_token:
print("❌ Please set LINODE_API_TOKEN environment variable")
exit(1)
# Initialize manager
manager = LinodeManager(api_token)
# Example workflow
try:
print("🚀 Linode Management Tool")
print("=" * 50)
# List available regions, types, and images
print("\n1. Available Regions:")
manager.list_regions()
print("\n2. Available Linode Types:")
manager.list_linode_types()
print("\n3. Available Images:")
manager.list_available_images()
# Example: Create a Linode (uncomment to use)
# print("\n4. Creating a new Linode...")
# linode = manager.create_linode(
# label="my-test-server",
# region="us-east", # Newark
# type="g6-nanode-1", # $5/month
# image="linode/ubuntu22.04", # Ubuntu 22.04
# tags=["test", "automation"]
# )
# List all Linodes
print("\n5. Current Linodes:")
manager.list_linodes()
except KeyboardInterrupt:
print("\n\n👋 Goodbye!")
except Exception as e:
print(f"\n❌ Unexpected error: {e}")
# requirements.txt
"""
requests>=2.31.0
python-dotenv>=1.0.0
"""
💻 Integração Object Storage python
🟡 intermediate
⭐⭐⭐
Gerenciar arquivos e objetos no Linode Object Storage com Python e AWS S3 SDK
⏱️ 30 min
🏷️ linode, object storage, s3, files, buckets
Prerequisites:
Linode account, Object Storage access keys, Python knowledge
# Linode Object Storage Manager
# Python - object_storage_manager.py + requirements.txt
import os
import mimetypes
import json
from datetime import datetime, timedelta
from boto3 import Session
from botocore.client import Config
import argparse
class LinodeObjectStorage:
def __init__(self, access_key, secret_key, region='us-east-1'):
"""Initialize Linode Object Storage client"""
# Linode Object Storage is S3-compatible
self.session = Session()
self.client = self.session.client(
's3',
endpoint_url=f'https://{region}.linodeobjects.com',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=Config(signature_version='s3v4'),
region_name=region
)
self.region = region
def create_bucket(self, bucket_name, private=False):
"""Create a new bucket"""
try:
if private:
# Create private bucket
self.client.create_bucket(
Bucket=bucket_name,
ACL='private'
)
else:
# Create public bucket (default)
self.client.create_bucket(Bucket=bucket_name)
print(f"✅ Bucket '{bucket_name}' created successfully")
print(f" Endpoint: https://{bucket_name}.{self.region}.linodeobjects.com")
print(f" Access: {'Private' if private else 'Public'}")
return True
except Exception as e:
print(f"❌ Error creating bucket: {str(e)}")
return False
def list_buckets(self):
"""List all buckets"""
try:
response = self.client.list_buckets()
buckets = response['Buckets']
print(f"📦 Found {len(buckets)} bucket(s):")
print("-" * 70)
for bucket in buckets:
print(f"📁 {bucket['Name']}")
print(f" Created: {bucket['CreationDate'].strftime('%Y-%m-%d %H:%M:%S')}")
print(f" URL: https://{bucket['Name']}.{self.region}.linodeobjects.com")
print()
return buckets
except Exception as e:
print(f"❌ Error listing buckets: {str(e)}")
return []
def delete_bucket(self, bucket_name, force=False):
"""Delete a bucket (must be empty unless forced)"""
if force:
# First delete all objects in the bucket
print(f"🗑️ Emptying bucket '{bucket_name}'...")
self._empty_bucket(bucket_name)
try:
self.client.delete_bucket(Bucket=bucket_name)
print(f"✅ Bucket '{bucket_name}' deleted successfully")
return True
except Exception as e:
print(f"❌ Error deleting bucket: {str(e)}")
return False
def _empty_bucket(self, bucket_name):
"""Delete all objects in a bucket"""
try:
# List all objects
response = self.client.list_objects_v2(Bucket=bucket_name)
objects = response.get('Contents', [])
if objects:
# Delete all objects
delete_keys = [{'Key': obj['Key']} for obj in objects]
self.client.delete_objects(Bucket=bucket_name, Delete={'Objects': delete_keys})
print(f" Deleted {len(objects)} objects")
# Handle versions if versioning is enabled
response = self.client.list_object_versions(Bucket=bucket_name)
versions = response.get('Versions', []) + response.get('DeleteMarkers', [])
if versions:
delete_keys = []
for version in versions:
delete_keys.append({
'Key': version['Key'],
'VersionId': version.get('VersionId')
})
self.client.delete_objects(Bucket=bucket_name, Delete={'Objects': delete_keys})
print(f" Deleted {len(versions)} versioned objects")
except Exception as e:
print(f"❌ Error emptying bucket: {str(e)}")
def upload_file(self, file_path, bucket_name, object_key=None, public=True, metadata=None):
"""Upload a file to Object Storage"""
if not os.path.exists(file_path):
print(f"❌ File not found: {file_path}")
return None
# Generate object key if not provided
if not object_key:
object_key = os.path.basename(file_path)
try:
# Determine content type
content_type, _ = mimetypes.guess_type(file_path)
if not content_type:
content_type = 'application/octet-stream'
# Prepare extra args
extra_args = {
'ContentType': content_type,
'Metadata': metadata or {}
}
# Set ACL
if public:
extra_args['ACL'] = 'public-read'
# Upload file
print(f"📤 Uploading '{file_path}' to '{bucket_name}/{object_key}'...")
self.client.upload_file(
file_path,
bucket_name,
object_key,
ExtraArgs=extra_args
)
file_url = f"https://{bucket_name}.{self.region}.linodeobjects.com/{object_key}"
print(f"✅ File uploaded successfully!")
print(f" URL: {file_url}")
print(f" Size: {os.path.getsize(file_path)} bytes")
print(f" Type: {content_type}")
return {
'bucket': bucket_name,
'key': object_key,
'url': file_url,
'size': os.path.getsize(file_path),
'type': content_type
}
except Exception as e:
print(f"❌ Error uploading file: {str(e)}")
return None
def download_file(self, bucket_name, object_key, local_path=None):
"""Download a file from Object Storage"""
try:
if not local_path:
local_path = os.path.basename(object_key)
# Create directory if it doesn't exist
os.makedirs(os.path.dirname(local_path), exist_ok=True)
print(f"📥 Downloading '{bucket_name}/{object_key}' to '{local_path}'...")
self.client.download_file(bucket_name, object_key, local_path)
print(f"✅ File downloaded successfully!")
print(f" Path: {local_path}")
print(f" Size: {os.path.getsize(local_path)} bytes")
return local_path
except Exception as e:
print(f"❌ Error downloading file: {str(e)}")
return None
def list_objects(self, bucket_name, prefix='', max_keys=100):
"""List objects in a bucket"""
try:
paginator = self.client.get_paginator('list_objects_v2')
print(f"📋 Objects in bucket '{bucket_name}':")
if prefix:
print(f" Prefix: {prefix}")
print("-" * 80)
objects = []
count = 0
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix, PaginationConfig={'MaxItems': max_keys}):
for obj in page.get('Contents', []):
count += 1
objects.append(obj)
# Format size
size = self._format_size(obj['Size'])
print(f"📄 {obj['Key']}")
print(f" Size: {size}")
print(f" Last Modified: {obj['LastModified'].strftime('%Y-%m-%d %H:%M:%S')}")
print(f" ETag: {obj['ETag']}")
# Generate public URL
public_url = f"https://{bucket_name}.{self.region}.linodeobjects.com/{obj['Key']}"
print(f" URL: {public_url}")
print()
print(f"📊 Total objects shown: {count}")
return objects
except Exception as e:
print(f"❌ Error listing objects: {str(e)}")
return []
def delete_object(self, bucket_name, object_key):
"""Delete an object from a bucket"""
try:
self.client.delete_object(Bucket=bucket_name, Key=object_key)
print(f"🗑️ Object '{object_key}' deleted successfully")
return True
except Exception as e:
print(f"❌ Error deleting object: {str(e)}")
return False
def generate_presigned_url(self, bucket_name, object_key, expiration=3600):
"""Generate a presigned URL for temporary access"""
try:
url = self.client.generate_presigned_url(
'get_object',
Params={'Bucket': bucket_name, 'Key': object_key},
ExpiresIn=expiration
)
expires_at = datetime.now() + timedelta(seconds=expiration)
print(f"🔗 Presigned URL generated:")
print(f" Object: {object_key}")
print(f" URL: {url}")
print(f" Expires: {expires_at.strftime('%Y-%m-%d %H:%M:%S')}")
print(f" Duration: {expiration} seconds")
return url
except Exception as e:
print(f"❌ Error generating presigned URL: {str(e)}")
return None
def sync_directory(self, local_dir, bucket_name, prefix='', delete=False, exclude_patterns=None):
"""Sync a local directory to Object Storage"""
try:
print(f"🔄 Starting sync from '{local_dir}' to '{bucket_name}'...")
uploaded = 0
updated = 0
deleted = 0
errors = 0
# Get existing objects in bucket
existing_objects = {}
if prefix:
existing_objects = {obj['Key']: obj for obj in self.list_objects(bucket_name, prefix)}
else:
existing_objects = {obj['Key']: obj for obj in self.list_objects(bucket_name)}
# Walk local directory
for root, dirs, files in os.walk(local_dir):
for file in files:
# Check exclude patterns
if exclude_patterns:
should_exclude = False
for pattern in exclude_patterns:
if pattern in file:
should_exclude = True
break
if should_exclude:
continue
local_path = os.path.join(root, file)
relative_path = os.path.relpath(local_path, local_dir).replace('\\', '/')
object_key = f"{prefix}/{relative_path}" if prefix else relative_path
# Check if file needs to be uploaded
needs_upload = True
if object_key in existing_objects:
local_mtime = datetime.fromtimestamp(os.path.getmtime(local_path))
remote_mtime = existing_objects[object_key]['LastModified'].replace(tzinfo=None)
if local_mtime <= remote_mtime:
needs_upload = False
print(f"⏭️ Skipping up-to-date: {object_key}")
else:
print(f"🔄 Updating: {object_key}")
else:
print(f"📤 Uploading: {object_key}")
if needs_upload:
metadata = {
'local_path': local_path,
'sync_time': datetime.now().isoformat()
}
result = self.upload_file(local_path, bucket_name, object_key, public=False, metadata=metadata)
if result:
if object_key in existing_objects:
updated += 1
else:
uploaded += 1
else:
errors += 1
# Delete remote objects that no longer exist locally (if delete=True)
if delete:
local_files = set()
for root, dirs, files in os.walk(local_dir):
for file in files:
relative_path = os.path.relpath(os.path.join(root, file), local_dir).replace('\\', '/')
object_key = f"{prefix}/{relative_path}" if prefix else relative_path
local_files.add(object_key)
for object_key in existing_objects:
if object_key not in local_files:
if self.delete_object(bucket_name, object_key):
deleted += 1
else:
errors += 1
print(f"\n✅ Sync completed:")
print(f" Uploaded: {uploaded}")
print(f" Updated: {updated}")
print(f" Deleted: {deleted}")
print(f" Errors: {errors}")
return {
'uploaded': uploaded,
'updated': updated,
'deleted': deleted,
'errors': errors
}
except Exception as e:
print(f"❌ Error syncing directory: {str(e)}")
return None
def set_bucket_website(self, bucket_name, index_file='index.html', error_file='error.html'):
"""Configure bucket for static website hosting"""
try:
website_config = {
'ErrorDocument': {'Key': error_file},
'IndexDocument': {'Suffix': index_file}
}
self.client.put_bucket_website(
Bucket=bucket_name,
WebsiteConfiguration=website_config
)
# Set bucket policy for public read access
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicReadGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": f"arn:aws:s3:::{bucket_name}/*"
}
]
}
self.client.put_bucket_policy(
Bucket=bucket_name,
Policy=json.dumps(policy)
)
website_url = f"http://{bucket_name}.website-{self.region}.linodeobjects.com"
print(f"🌐 Static website configured for bucket '{bucket_name}'")
print(f" Website URL: {website_url}")
print(f" Index file: {index_file}")
print(f" Error file: {error_file}")
return website_url
except Exception as e:
print(f"❌ Error configuring website: {str(e)}")
return None
def _format_size(self, size_bytes):
"""Format file size in human readable format"""
if size_bytes == 0:
return "0B"
size_names = ["B", "KB", "MB", "GB", "TB"]
i = 0
size = float(size_bytes)
while size >= 1024.0 and i < len(size_names) - 1:
size /= 1024.0
i += 1
return f"{size:.2f}{size_names[i]}"
def main():
"""Command-line interface"""
parser = argparse.ArgumentParser(description='Linode Object Storage Manager')
parser.add_argument('command', choices=[
'create-bucket', 'list-buckets', 'delete-bucket',
'upload', 'download', 'list-objects', 'delete-object',
'presigned-url', 'sync', 'website'
])
parser.add_argument('--bucket', required=False, help='Bucket name')
parser.add_argument('--file', required=False, help='File path')
parser.add_argument('--key', required=False, help='Object key')
parser.add_argument('--local-path', required=False, help='Local file path')
parser.add_argument('--directory', required=False, help='Local directory path')
parser.add_argument('--prefix', required=False, default='', help='Object prefix')
parser.add_argument('--public', action='store_true', help='Make object public')
parser.add_argument('--private', action='store_true', help='Make bucket private')
parser.add_argument('--force', action='store_true', help='Force delete bucket')
parser.add_argument('--delete', action='store_true', help='Delete remote files when syncing')
parser.add_argument('--exclude', required=False, nargs='*', help='Exclude patterns')
parser.add_argument('--expiration', type=int, default=3600, help='Presigned URL expiration (seconds)')
args = parser.parse_args()
# Load credentials from environment
access_key = os.environ.get('LINODE_ACCESS_KEY')
secret_key = os.environ.get('LINODE_SECRET_KEY')
region = os.environ.get('LINODE_OBJECT_REGION', 'us-east-1')
if not access_key or not secret_key:
print("❌ Please set LINODE_ACCESS_KEY and LINODE_SECRET_KEY environment variables")
return
storage = LinodeObjectStorage(access_key, secret_key, region)
# Execute command
try:
if args.command == 'create-bucket':
if not args.bucket:
print("❌ Bucket name is required")
return
storage.create_bucket(args.bucket, args.private)
elif args.command == 'list-buckets':
storage.list_buckets()
elif args.command == 'delete-bucket':
if not args.bucket:
print("❌ Bucket name is required")
return
storage.delete_bucket(args.bucket, args.force)
elif args.command == 'upload':
if not args.bucket or not args.file:
print("❌ Bucket name and file path are required")
return
storage.upload_file(args.file, args.bucket, args.key, args.public)
elif args.command == 'download':
if not args.bucket or not args.key:
print("❌ Bucket name and object key are required")
return
storage.download_file(args.bucket, args.key, args.local_path)
elif args.command == 'list-objects':
if not args.bucket:
print("❌ Bucket name is required")
return
storage.list_objects(args.bucket, args.prefix)
elif args.command == 'delete-object':
if not args.bucket or not args.key:
print("❌ Bucket name and object key are required")
return
storage.delete_object(args.bucket, args.key)
elif args.command == 'presigned-url':
if not args.bucket or not args.key:
print("❌ Bucket name and object key are required")
return
storage.generate_presigned_url(args.bucket, args.key, args.expiration)
elif args.command == 'sync':
if not args.bucket or not args.directory:
print("❌ Bucket name and directory path are required")
return
storage.sync_directory(args.directory, args.bucket, args.prefix, args.delete, args.exclude)
elif args.command == 'website':
if not args.bucket:
print("❌ Bucket name is required")
return
storage.set_bucket_website(args.bucket)
except KeyboardInterrupt:
print("\n\n👋 Goodbye!")
except Exception as e:
print(f"❌ Error: {str(e)}")
if __name__ == "__main__":
main()
# requirements.txt
"""
boto3>=1.29.0
python-dotenv>=1.0.0
mimetypes>=1.0
"""
💻 Configuração Load Balancer yaml
🟡 intermediate
⭐⭐⭐
Configurar e gerenciar Load Balancers da Linode para alta disponibilidade
⏱️ 35 min
🏷️ linode, load balancer, terraform, infrastructure, ha
Prerequisites:
Linode account, Terraform knowledge, Load Balancer service, SSH key
# Linode Load Balancer Configuration
# Using Terraform for infrastructure as code
# terraform/main.tf
terraform {
required_providers {
linode = {
source = "linode/linode"
version = "~> 2.0"
}
}
required_version = ">= 1.0"
}
provider "linode" {
token = var.linode_token
}
# Variables
variable "linode_token" {
description = "Linode API token"
type = string
sensitive = true
}
variable "region" {
description = "Linode region"
type = string
default = "us-east"
}
variable "app_label" {
description = "Label prefix for resources"
type = string
default = "myapp"
}
# Create backend Linodes
resource "linode_instance" "web_servers" {
count = 3
label = "${var.app_label}-web-${count.index}"
image = "linode/ubuntu22.04"
region = var.region
type = "g6-standard-2"
authorized_keys = [var.ssh_public_key]
root_pass = random_password.root_password.result
tags = ["web", "production"]
# User data script for web server setup
user_data = templatefile("${path.module}/cloud-init/web-server.sh", {
hostname = "${var.app_label}-web-${count.index}"
})
private_ip = true
# Wait for SSH before completing
connection {
type = "ssh"
user = "root"
host = self.ip_address
timeout = "2m"
}
}
# Create database Linode
resource "linode_instance" "database" {
label = "${var.app_label}-database"
image = "linode/ubuntu22.04"
region = var.region
type = "g6-standard-2"
authorized_keys = [var.ssh_public_key]
root_pass = random_password.root_password.result
tags = ["database", "production"]
# User data script for database setup
user_data = templatefile("${path.module}/cloud-init/database.sh", {
hostname = "${var.app_label}-database"
})
private_ip = true
}
# Create Load Balancer
resource "linode_loadbalancer" "main" {
label = "${var.app_label}-lb"
region = var.region
# Protocol configuration
protocol = "https"
# HTTPS configuration
port = 443
# Algorithm
algorithm = "roundrobin"
# Session stickiness
session_stickiness = "none"
# Health check
health_check {
protocol = "http"
path = "/health"
port = 80
check_interval = 10
response_timeout = 5
healthy_threshold = 2
unhealthy_threshold = 3
}
# SSL certificate (can also use linode_certificate resource)
ssl_cert = <<-EOT
${var.ssl_certificate}
EOT
ssl_key = <<-EOT
${var.ssl_private_key}
EOT
tags = ["loadbalancer", "production"]
}
# Attach backend Linodes to Load Balancer
resource "linode_nodebalancer_node" "web_servers" {
count = length(linode_instance.web_servers)
nodebalancer_id = linode_loadbalancer.main.id
label = "${var.app_label}-web-${count.index}"
address = linode_instance.web_servers[count.index].ip_address
port = 80
weight = 100
mode = "accept"
}
# Backend service configuration for port 80 redirect
resource "linode_nodebalancer_config" "http_redirect" {
nodebalancer_id = linode_loadbalancer.main.id
port = 80
protocol = "http"
# Redirect HTTP to HTTPS
algorithm = "roundrobin"
health_check {
protocol = "http"
path = "/health"
port = 80
check_interval = 10
response_timeout = 5
healthy_threshold = 2
unhealthy_threshold = 3
}
# SSL offloading - redirect to HTTPS
ssl_cert = ""
ssl_key = ""
rules {
hostname = "yourdomain.com"
path = "/*"
service = linode_loadbalancer_config.https.id
priority = 1
}
tags = ["redirect", "production"]
}
# HTTPS backend configuration
resource "linode_nodebalancer_config" "https" {
nodebalancer_id = linode_loadbalancer.main.id
port = 443
protocol = "https"
algorithm = "roundrobin"
health_check {
protocol = "http"
path = "/health"
port = 80
check_interval = 10
response_timeout = 5
healthy_threshold = 2
unhealthy_threshold = 3
}
tags = ["https", "production"]
}
# Cloud-init scripts directory
# terraform/cloud-init/web-server.sh
#!/bin/bash
# Cloud-init script for web servers
# Set hostname
hostname="${hostname}"
echo "127.0.0.1 ${hostname}" >> /etc/hosts
echo "${hostname}" > /etc/hostname
# Update system
apt-get update
apt-get upgrade -y
# Install Nginx and Node.js
apt-get install -y nginx curl
# Install Node.js 18
curl -fsSL https://deb.nodesource.com/setup_18.x | bash -
apt-get install -y nodejs
# Create app directory
mkdir -p /var/www/app
cd /var/www/app
# Create simple Express.js app
cat > app.js << 'EOF'
const express = require('express');
const app = express();
const port = 80;
app.get('/health', (req, res) => {
res.json({
status: 'healthy',
hostname: require('os').hostname(),
timestamp: new Date().toISOString()
});
});
app.get('/', (req, res) => {
res.send('Hello from web server!');
});
app.listen(port, '0.0.0.0', () => {
console.log(`Server running on port ${port}`);
});
EOF
# Create package.json
cat > package.json << 'EOF'
{
"name": "web-app",
"version": "1.0.0",
"main": "app.js",
"dependencies": {
"express": "^4.18.0"
},
"scripts": {
"start": "node app.js"
}
}
EOF
# Install dependencies
npm install
# Configure Nginx as reverse proxy
cat > /etc/nginx/sites-available/default << 'EOF'
server {
listen 80 default_server;
server_name _;
location / {
proxy_pass http://localhost:80;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
}
EOF
# Enable and start services
systemctl enable nginx
systemctl start nginx
# Create systemd service for app
cat > /etc/systemd/system/webapp.service << 'EOF'
[Unit]
Description=Web Application
After=network.target
[Service]
Type=simple
User=www-data
WorkingDirectory=/var/www/app
ExecStart=/usr/bin/node app.js
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable webapp
systemctl start webapp
# Configure firewall (if UFW is available)
if command -v ufw >/dev/null 2>&1; then
ufw --force enable
ufw allow ssh
ufw allow 80
ufw allow 443
fi
echo "Web server setup complete!"
# terraform/cloud-init/database.sh
#!/bin/bash
# Cloud-init script for database server
# Set hostname
hostname="${hostname}"
echo "127.0.0.1 ${hostname}" >> /etc/hosts
echo "${hostname}" > /etc/hostname
# Update system
apt-get update
apt-get upgrade -y
# Install PostgreSQL 14
apt-get install -y postgresql postgresql-contrib
# Configure PostgreSQL
# Allow connections from Linode private network
cat >> /etc/postgresql/14/main/postgresql.conf << 'EOF'
listen_addresses = '*'
EOF
# Configure pg_hba.conf for private network access
cat >> /etc/postgresql/14/main/pg_hba.conf << 'EOF'
# Allow private network connections
host all all 10.0.0.0/8 md5
EOF
# Create database and user
sudo -u postgres psql << 'EOF'
CREATE DATABASE myapp;
CREATE USER myappuser WITH PASSWORD 'secretpassword';
GRANT ALL PRIVILEGES ON DATABASE myapp TO myappuser;
ALTER USER myappuser CREATEDB;
EOF
# Enable and start PostgreSQL
systemctl enable postgresql
systemctl restart postgresql
# Configure firewall
if command -v ufw >/dev/null 2>&1; then
ufw --force enable
ufw allow ssh
ufw allow 5432
fi
echo "Database server setup complete!"
# terraform/variables.tf
variable "ssh_public_key" {
description = "SSH public key for Linode instances"
type = string
}
variable "ssl_certificate" {
description = "SSL certificate for HTTPS"
type = string
}
variable "ssl_private_key" {
description = "SSL private key for HTTPS"
type = string
sensitive = true
}
# terraform/outputs.tf
output "loadbalancer_ip" {
description = "Load Balancer IPv4 address"
value = linode_loadbalancer.main.ipv4
}
output "loadbalancer_hostname" {
description = "Load Balancer hostname"
value = linode_loadbalancer.main.hostname
}
output "web_server_ips" {
description = "Web server IP addresses"
value = linode_instance.web_servers.*.ip_address
}
output "database_ip" {
description = "Database server IP address"
value = linode_instance.database.ip_address
}
# Manual CLI commands for Load Balancer management
"""
# List load balancers
linode-cli load-balancers list
# Create load balancer
linode-cli load-balancers create \
--label "my-lb" \
--region us-east \
--protocol https \
--port 443 \
--algorithm roundrobin \
--check-interval 10 \
--check-timeout 5 \
--check-path /health \
--check_attempts 3
# Create backend configuration
linode-cli load-balancers config-create 12345 \
--protocol https \
--port 443 \
--algorithm roundrobin
# Add node to load balancer
linode-cli load-balancers node-create 12345 \
--label "node-1" \
--address 192.168.1.100 \
--port 80 \
--weight 100
# Update SSL certificate
linode-cli load-balancers update 12345 \
--ssl-cert /path/to/cert.pem \
--ssl-key /path/to/key.pem
# Get load balancer details
linode-cli load-balancers view 12345
# Delete load balancer
linode-cli load-balancers delete 12345
"""
💻 Deploy LKE Kubernetes yaml
🔴 complex
⭐⭐⭐⭐
Implantar e gerenciar aplicações conteinerizadas no Linode Kubernetes Engine (LKE)
⏱️ 45 min
🏷️ linode, kubernetes, lke, containers, k8s
Prerequisites:
Linode account, LKE enabled, kubectl installed, Docker knowledge
# Linode Kubernetes Engine (LKE) Deployment
# Complete Kubernetes application deployment
# 1. Create LKE cluster using Linode CLI or Terraform
# terraform/lke-cluster.tf
resource "linode_lke_cluster" "main" {
label = "production-cluster"
k8s_version = "1.28"
region = "us-east"
tags = ["production", "k8s"]
}
# Node pool configuration
resource "linode_lke_node_pool" "main_nodes" {
cluster_id = linode_lke_cluster.main.id
type = "g6-standard-2"
count = 3
autoscaler {
min = 1
max = 5
}
}
# Additional node pool for specific workloads
resource "linode_lke_node_pool" "workers" {
cluster_id = linode_lke_cluster.main.id
type = "g6-standard-4"
count = 2
tags = ["workers", "high-memory"]
}
# 2. Kubernetes manifests for application deployment
# k8s/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: production
labels:
name: production
environment: production
---
# k8s/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
namespace: production
data:
NODE_ENV: "production"
LOG_LEVEL: "info"
REDIS_HOST: "redis-service"
REDIS_PORT: "6379"
DB_HOST: "postgresql-service"
DB_PORT: "5432"
DB_NAME: "myapp"
API_BASE_URL: "https://api.myapp.com"
---
# k8s/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: app-secrets
namespace: production
type: Opaque
data:
# Base64 encoded values
db-password: c2VjdXJlZGF0YWJhc2VwYXNzd29yZA== # securedatabasepassword
jwt-secret: c3VwZXJzZWNyZXRqd3R0b2tlbg== # supersecretjwttoken
api-key: c3VwZXJzZWNyZXRhcGlrZXk= # supersecretapikey
redis-password: cmVkaXNwYXNzd29yZA== # redispassword
---
# k8s/storage.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: production
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: linode-block-storage
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-pvc
namespace: production
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: linode-block-storage
---
# k8s/postgresql.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgresql
namespace: production
spec:
serviceName: postgresql-service
replicas: 1
selector:
matchLabels:
app: postgresql
template:
metadata:
labels:
app: postgresql
spec:
containers:
- name: postgresql
image: postgres:15
env:
- name: POSTGRES_DB
valueFrom:
configMapKeyRef:
name: app-config
key: DB_NAME
- name: POSTGRES_USER
value: myappuser
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: app-secrets
key: db-password
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
ports:
- containerPort: 5432
volumeMounts:
- name: postgres-storage
mountPath: /var/lib/postgresql/data
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "500m"
livenessProbe:
exec:
command:
- pg_isready
- -U
- myappuser
- -d
- myapp
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- pg_isready
- -U
- myappuser
- -d
- myapp
initialDelaySeconds: 5
periodSeconds: 5
volumeClaimTemplates:
- metadata:
name: postgres-storage
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
storageClassName: linode-block-storage
---
# k8s/postgresql-service.yaml
apiVersion: v1
kind: Service
metadata:
name: postgresql-service
namespace: production
spec:
selector:
app: postgresql
ports:
- port: 5432
targetPort: 5432
type: ClusterIP
---
# k8s/redis.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: production
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
command:
- redis-server
- --requirepass
- "$(REDIS_PASSWORD)"
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: app-secrets
key: redis-password
ports:
- containerPort: 6379
volumeMounts:
- name: redis-storage
mountPath: /data
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "250m"
livenessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: redis-storage
persistentVolumeClaim:
claimName: redis-pvc
---
# k8s/redis-service.yaml
apiVersion: v1
kind: Service
metadata:
name: redis-service
namespace: production
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379
type: ClusterIP
---
# k8s/application.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-app
namespace: production
labels:
app: web-app
spec:
replicas: 3
selector:
matchLabels:
app: web-app
template:
metadata:
labels:
app: web-app
spec:
containers:
- name: web-app
image: your-registry/web-app:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
envFrom:
- configMapRef:
name: app-config
- secretRef:
name: app-secrets
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 15"]
---
# k8s/application-service.yaml
apiVersion: v1
kind: Service
metadata:
name: web-app-service
namespace: production
spec:
selector:
app: web-app
ports:
- port: 80
targetPort: 3000
type: ClusterIP
---
# k8s/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app-ingress
namespace: production
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
tls:
- hosts:
- yourdomain.com
- api.yourdomain.com
secretName: app-tls
rules:
- host: yourdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-app-service
port:
number: 80
- host: api.yourdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-app-service
port:
number: 80
---
# k8s/hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: web-app-hpa
namespace: production
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: web-app
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 10
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 50
periodSeconds: 60
- type: Pods
value: 2
periodSeconds: 60
selectPolicy: Max
---
# k8s/vpa.yaml
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: web-app-vpa
namespace: production
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: web-app
updatePolicy:
updateMode: "Auto"
resourcePolicy:
containerPolicies:
- containerName: web-app
maxAllowed:
cpu: 1
memory: 1Gi
minAllowed:
cpu: 100m
memory: 128Mi
---
# k8s/network-policy.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: app-network-policy
namespace: production
spec:
podSelector:
matchLabels:
app: web-app
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: ingress-nginx
- podSelector:
matchLabels:
app: postgresql
- podSelector:
matchLabels:
app: redis
ports:
- protocol: TCP
port: 3000
egress:
- to:
- podSelector:
matchLabels:
app: postgresql
ports:
- protocol: TCP
port: 5432
- to:
- podSelector:
matchLabels:
app: redis
ports:
- protocol: TCP
port: 6379
- to: []
ports:
- protocol: TCP
port: 53
- protocol: UDP
port: 53
- protocol: TCP
port: 443
---
# k8s/poddisruptionbudget.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: web-app-pdb
namespace: production
spec:
minAvailable: 2
selector:
matchLabels:
app: web-app
---
# k8s/cronjob-backup.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: database-backup
namespace: production
spec:
schedule: "0 2 * * *" # Daily at 2 AM
jobTemplate:
spec:
template:
spec:
containers:
- name: pg-backup
image: postgres:15
env:
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: app-secrets
key: db-password
command:
- /bin/bash
- -c
- |
pg_dump -h postgresql-service -U myappuser -d myapp | gzip > /backup/backup-$(date +%Y%m%d-%H%M%S).sql.gz
volumeMounts:
- name: backup-storage
mountPath: /backup
volumes:
- name: backup-storage
persistentVolumeClaim:
claimName: backup-pvc
restartPolicy: OnFailure
successfulJobsHistoryLimit: 7
failedJobsHistoryLimit: 3
---
# k8s/monitoring.yaml
apiVersion: v1
kind: ServiceMonitor
metadata:
name: web-app-metrics
namespace: production
labels:
app: web-app
spec:
selector:
matchLabels:
app: web-app
endpoints:
- port: metrics
interval: 30s
path: /metrics
---
# k8s/security.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
namespace: production
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
# 3. Deployment scripts
# scripts/deploy.sh
#!/bin/bash
set -e
# Configure kubectl to connect to LKE cluster
LINODE_API_TOKEN="${LINODE_API_TOKEN}"
CLUSTER_ID="12345"
# Get kubeconfig
linode-cli lke kubeconfig-view $CLUSTER_ID > ~/.kube/config-lke
export KUBECONFIG=~/.kube/config-lke
# Create namespace
kubectl create namespace production --dry-run=client -o yaml | kubectl apply -f -
# Apply all manifests
echo "Applying Kubernetes manifests..."
kubectl apply -f k8s/
# Wait for deployments
echo "Waiting for deployments to be ready..."
kubectl rollout status deployment/web-app -n production --timeout=300s
kubectl rollout status deployment/postgresql -n production --timeout=300s
kubectl rollout status deployment/redis -n production --timeout=300s
# Check pod status
echo "Checking pod status..."
kubectl get pods -n production
echo "Deployment completed successfully!"
# 4. Cleanup script
# scripts/cleanup.sh
#!/bin/bash
set -e
echo "Cleaning up Kubernetes resources..."
kubectl delete -f k8s/ --ignore-not-found=true
echo "Cleaning up namespace..."
kubectl delete namespace production --ignore-not-found=true
echo "Cleanup completed!"
# 5. CLI commands for LKE management
"""
# Create LKE cluster
linode-cli lke cluster-create \
--label production-cluster \
--region us-east \
--k8s-version 1.28 \
--node-pools.count 3 \
--node-pools.type g6-standard-2 \
--node-pools.autoscaler.min 1 \
--node-pools.autoscaler.max 5
# List clusters
linode-cli lke clusters list
# Get kubeconfig
linode-cli lke kubeconfig-view <cluster-id> > ~/.kube/config-lke
# Add additional node pool
linode-cli lke nodepool-add <cluster-id> \
--type g6-standard-4 \
--count 2 \
--tags workers,high-memory
# Scale node pool
linode-cli lke nodepool-update <cluster-id> <nodepool-id> \
--count 5
# Recycle nodes
linode-cli lke node-recycle <cluster-id> <node-id>
# Delete cluster
linode-cli lke cluster-delete <cluster-id>
"""