Skip to main content

Deployment Guide

This guide covers the deployment process for FlavumHive in production environments.

Deployment Options

FlavumHive can be deployed using:

  1. Docker containers
  2. Systemd service
  3. Manual process management
  4. Cloud platforms

Docker Deployment

1. Using Docker Compose

# docker-compose.yml
version: '3.8'
services:
twitter_bot:
build: .
environment:
- TWITTER_USERNAME=${TWITTER_USERNAME}
- TWITTER_PASSWORD=${TWITTER_PASSWORD}
- TWITTER_EMAIL=${TWITTER_EMAIL}
- OPENAI_API_KEY=${OPENAI_API_KEY}
volumes:
- ./data:/app/data
- ./logs:/app/logs
command: python continuous_twitter_bot.py
restart: unless-stopped

reddit_bot:
build: .
environment:
- REDDIT_USERNAME=${REDDIT_USERNAME}
- REDDIT_PASSWORD=${REDDIT_PASSWORD}
- REDDIT_CLIENT_ID=${REDDIT_CLIENT_ID}
- REDDIT_CLIENT_SECRET=${REDDIT_CLIENT_SECRET}
volumes:
- ./data:/app/data
- ./logs:/app/logs
command: python main.py --platform reddit
restart: unless-stopped

2. Building the Image

# Dockerfile
FROM python:3.9-slim

# Install Chrome and dependencies
RUN apt-get update && apt-get install -y \
chromium \
chromium-driver \
&& rm -rf /var/lib/apt/lists/*

# Set up working directory
WORKDIR /app

# Copy requirements and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

# Copy application code
COPY . .

# Create necessary directories
RUN mkdir -p data logs

# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV CHROME_BIN=/usr/bin/chromium

# Default command
CMD ["python", "continuous_twitter_bot.py"]

3. Running with Docker

# Build and start containers
docker-compose up -d

# View logs
docker-compose logs -f

# Stop containers
docker-compose down

Systemd Service Deployment

1. Twitter Bot Service

# /etc/systemd/system/twitter_bot.service
[Unit]
Description=FlavumHive Twitter Bot
After=network.target

[Service]
Type=simple
User=flavumhive
WorkingDirectory=/opt/flavumhive
Environment=PYTHONUNBUFFERED=1
EnvironmentFile=/opt/flavumhive/.env
ExecStart=/opt/flavumhive/venv/bin/python continuous_twitter_bot.py
Restart=always
RestartSec=30

[Install]
WantedBy=multi-user.target

2. Reddit Bot Service

# /etc/systemd/system/reddit_bot.service
[Unit]
Description=FlavumHive Reddit Bot
After=network.target

[Service]
Type=simple
User=flavumhive
WorkingDirectory=/opt/flavumhive
Environment=PYTHONUNBUFFERED=1
EnvironmentFile=/opt/flavumhive/.env
ExecStart=/opt/flavumhive/venv/bin/python main.py --platform reddit
Restart=always
RestartSec=30

[Install]
WantedBy=multi-user.target

3. Managing Services

# Enable services
sudo systemctl enable twitter_bot
sudo systemctl enable reddit_bot

# Start services
sudo systemctl start twitter_bot
sudo systemctl start reddit_bot

# Check status
sudo systemctl status twitter_bot
sudo systemctl status reddit_bot

# View logs
sudo journalctl -u twitter_bot -f
sudo journalctl -u reddit_bot -f

Manual Process Management

1. Using Screen Sessions

# Start Twitter bot
screen -S twitter_bot
source venv/bin/activate
python continuous_twitter_bot.py
# Ctrl+A, D to detach

# Start Reddit bot
screen -S reddit_bot
source venv/bin/activate
python main.py --platform reddit
# Ctrl+A, D to detach

# List sessions
screen -ls

# Reattach to session
screen -r twitter_bot

2. Using PM2

# Install PM2
npm install -g pm2

# Start bots
pm2 start continuous_twitter_bot.py --name "twitter_bot"
pm2 start main.py --name "reddit_bot" -- --platform reddit

# Monitor processes
pm2 monit

# View logs
pm2 logs

Cloud Platform Deployment

1. AWS EC2 Setup

# Install dependencies
sudo yum update -y
sudo yum install -y python39 git chromium chromedriver

# Clone repository
git clone https://github.com/yourusername/flavumhive.git
cd flavumhive

# Set up Python environment
python3.9 -m venv venv
source venv/bin/activate
pip install -r requirements.txt

# Configure environment
cp .env.example .env
nano .env

2. Google Cloud Run

# cloudbuild.yaml
steps:
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/flavumhive', '.']
- name: 'gcr.io/cloud-builders/docker'
args: ['push', 'gcr.io/$PROJECT_ID/flavumhive']
images:
- 'gcr.io/$PROJECT_ID/flavumhive'

Monitoring Setup

1. Prometheus Configuration

# prometheus.yml
scrape_configs:
- job_name: 'flavumhive'
static_configs:
- targets: ['localhost:8000']

2. Grafana Dashboard

{
"dashboard": {
"panels": [
{
"title": "Tweet Rate",
"type": "graph",
"datasource": "Prometheus",
"targets": [
{
"expr": "rate(tweets_total[5m])"
}
]
}
]
}
}

Backup Strategy

1. Database Backup

#!/bin/bash
# backup.sh
DATE=$(date +%Y%m%d)
BACKUP_DIR="/opt/flavumhive/backups"

# Backup databases
sqlite3 bot.db ".backup '$BACKUP_DIR/bot_$DATE.db'"
sqlite3 reddit_bot.db ".backup '$BACKUP_DIR/reddit_bot_$DATE.db'"

# Compress backups
tar -czf "$BACKUP_DIR/backup_$DATE.tar.gz" "$BACKUP_DIR"/*.db

# Clean old backups (keep last 7 days)
find "$BACKUP_DIR" -name "*.tar.gz" -mtime +7 -delete

2. Configuration Backup

# Backup configuration files
cp .env "$BACKUP_DIR/env_$DATE"
cp config.json "$BACKUP_DIR/config_$DATE.json"

Security Considerations

1. File Permissions

# Set secure permissions
chmod 600 .env
chmod 644 config.json
chmod 755 continuous_twitter_bot.py main.py

2. Network Security

# Configure firewall
sudo ufw allow ssh
sudo ufw allow http
sudo ufw enable

3. Credential Management

# Use environment variables
export TWITTER_USERNAME="your_username"
export TWITTER_PASSWORD="your_password"

# Or use credential management service
aws secretsmanager get-secret-value --secret-id flavumhive/twitter

Maintenance Procedures

1. Log Rotation

# /etc/logrotate.d/flavumhive
/opt/flavumhive/logs/*.log {
daily
rotate 7
compress
delaycompress
missingok
notifempty
create 0640 flavumhive flavumhive
}

2. Health Checks

#!/bin/bash
# health_check.sh
check_process() {
if ! pgrep -f "$1" > /dev/null; then
echo "Process $1 is down. Restarting..."
systemctl restart "$1"
fi
}

check_process "twitter_bot"
check_process "reddit_bot"

Troubleshooting

Common Issues

  1. Chrome/Selenium Issues
# Clear Chrome user data
rm -rf debug_twitter/*
systemctl restart twitter_bot
  1. Database Locks
# Check and kill locked processes
fuser bot.db
fuser reddit_bot.db
  1. Memory Issues
# Monitor memory usage
watch -n 1 'ps aux | grep python'

# Adjust limits in systemd service
MemoryLimit=1G

Next Steps