added cargo files

This commit is contained in:
2026-03-03 10:57:43 -05:00
parent 478a90e01b
commit 169df46bc2
813 changed files with 227273 additions and 9 deletions

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# Ensure app has time to start
sleep 10
echo "Getting background tasks API key..."
# Get API key from database for background_tasks user (UserID = 1)
if [ "$DB_TYPE" = "postgresql" ]; then
API_KEY=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c 'SELECT apikey FROM "APIKeys" WHERE userid = 1 LIMIT 1;' 2>/dev/null | xargs)
else
API_KEY=$(mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" "$DB_NAME" -se 'SELECT APIKey FROM APIKeys WHERE UserID = 1 LIMIT 1;' 2>/dev/null)
fi
if [ -z "$API_KEY" ]; then
echo "Error: Could not retrieve API key for background tasks"
exit 1
fi
# Initialize application tasks
echo "Initializing application tasks..."
curl -X POST "http://localhost:8032/api/init/startup_tasks" \
-H "Content-Type: application/json" \
-d "{\"api_key\": \"$API_KEY\"}" >> /cron.log 2>&1

View File

@@ -0,0 +1,31 @@
#!/bin/bash
sleep 10
# Get API key directly from database for background_tasks user (UserID 1)
echo "Getting background tasks API key..."
# Database connection parameters
DB_TYPE=${DB_TYPE:-postgresql}
DB_HOST=${DB_HOST:-127.0.0.1}
DB_PORT=${DB_PORT:-5432}
DB_USER=${DB_USER:-postgres}
DB_PASSWORD=${DB_PASSWORD:-password}
DB_NAME=${DB_NAME:-pinepods_database}
if [ "$DB_TYPE" = "postgresql" ]; then
API_KEY=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c 'SELECT apikey FROM "APIKeys" WHERE userid = 1 LIMIT 1;' 2>/dev/null | xargs)
else
API_KEY=$(mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" "$DB_NAME" -se 'SELECT APIKey FROM APIKeys WHERE UserID = 1 LIMIT 1;' 2>/dev/null)
fi
if [ -z "$API_KEY" ]; then
echo "Failed to get background tasks API key from database" >> /cron.log 2>&1
exit 1
fi
# Call the FastAPI endpoint using the API key
# Run cleanup tasks
echo "Running nightly tasks..."
curl -X GET "http://localhost:8032/api/data/refresh_hosts" -H "Api-Key: $API_KEY" >> /cron.log 2>&1
curl -X GET "http://localhost:8032/api/data/auto_complete_episodes" -H "Api-Key: $API_KEY" >> /cron.log 2>&1

View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Ensure app has time to start
sleep 10
# Get API key directly from database for background_tasks user (UserID 1)
echo "Getting background tasks API key..."
# Database connection parameters
DB_TYPE=${DB_TYPE:-postgresql}
DB_HOST=${DB_HOST:-127.0.0.1}
DB_PORT=${DB_PORT:-5432}
DB_USER=${DB_USER:-postgres}
DB_PASSWORD=${DB_PASSWORD:-password}
DB_NAME=${DB_NAME:-pinepods_database}
if [ "$DB_TYPE" = "postgresql" ]; then
API_KEY=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c 'SELECT apikey FROM "APIKeys" WHERE userid = 1 LIMIT 1;' 2>/dev/null | xargs)
else
API_KEY=$(mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" "$DB_NAME" -se 'SELECT APIKey FROM APIKeys WHERE UserID = 1 LIMIT 1;' 2>/dev/null)
fi
if [ -z "$API_KEY" ]; then
echo "Failed to get background tasks API key from database" >> /cron.log 2>&1
exit 1
fi
# Call the FastAPI endpoint using the API key
echo "Refreshing now!"
curl "http://localhost:8032/api/data/refresh_pods" -H "Api-Key: $API_KEY" >> /cron.log 2>&1
echo "Refreshing Nextcloud Subscription now!"
curl -X GET -H "Api-Key: $API_KEY" http://localhost:8032/api/data/refresh_nextcloud_subscriptions >> /cron.log 2>&1
# Run cleanup tasks
echo "Running cleanup tasks..."
curl -X GET "http://localhost:8032/api/data/cleanup_tasks" -H "Api-Key: $API_KEY" >> /cron.log 2>&1
# Refresh Playlists
echo "Refreshing Playlists..."
curl -X GET "http://localhost:8032/api/data/update_playlists" -H "Api-Key: $API_KEY" >> /cron.log 2>&1

View File

@@ -0,0 +1,28 @@
[loggers]
keys=root,simpleExample
[handlers]
keys=consoleHandler
[formatters]
keys=simpleFormatter
[logger_root]
level=ERROR
handlers=consoleHandler
[logger_simpleExample]
level=ERROR
handlers=consoleHandler
qualname=simpleExample
propagate=0
[handler_consoleHandler]
class=StreamHandler
level=ERROR
formatter=simpleFormatter
args=(sys.stdout,)
[formatter_simpleFormatter]
format=[%(asctime)s] [%(levelname)s] - %(name)s: %(message)s
datefmt=%Y-%m-%d %H:%M:%S

View File

@@ -0,0 +1,28 @@
[loggers]
keys=root,simpleExample
[handlers]
keys=consoleHandler
[formatters]
keys=simpleFormatter
[logger_root]
level=INFO
handlers=consoleHandler
[logger_simpleExample]
level=INFO
handlers=consoleHandler
qualname=simpleExample
propagate=0
[handler_consoleHandler]
class=StreamHandler
level=INFO
formatter=simpleFormatter
args=(sys.stdout,)
[formatter_simpleFormatter]
format=[%(asctime)s] [%(levelname)s] - %(name)s: %(message)s
datefmt=%Y-%m-%d %H:%M:%S

View File

@@ -0,0 +1,193 @@
events {}
http {
include mime.types;
default_type application/octet-stream;
client_max_body_size 0;
server {
listen 8040;
root /var/www/html;
index index.html;
# Add CORS headers to all responses
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
location /rss/ {
# Rewrite /rss/123 to /api/feed/123
rewrite ^/rss/(\d+)(?:/(\d+))?$ /api/feed/$1$2 last;
proxy_pass http://localhost:8032;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Api-Key $arg_api_key; # Pass the api_key query param as a header
# RSS-specific headers
add_header Content-Type "application/rss+xml; charset=utf-8";
expires 1h;
add_header Cache-Control "public, no-transform";
}
location / {
# Handle OPTIONS requests for CORS preflight
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain; charset=utf-8';
add_header 'Content-Length' 0;
return 204;
}
try_files $uri $uri/ /index.html;
}
location /api {
# Add CORS headers for /api responses
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
proxy_pass http://localhost:8032;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain; charset=utf-8';
add_header 'Content-Length' 0;
return 204;
}
}
# Route all gpodder API requests to the Go service
location ~ ^/(api/2|auth|subscriptions|devices|updates|episodes|settings|lists|favorites|sync-devices|search|suggestions|toplist|tag|tags|data)/ {
proxy_pass http://127.0.0.1:8042;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Add CORS headers
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
# Handle OPTIONS requests
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain; charset=utf-8';
add_header 'Content-Length' 0;
return 204;
}
# Increase timeouts for longer operations
proxy_read_timeout 300;
proxy_send_timeout 300;
}
# Special route for gpodder.net protocol support
location /api/gpodder {
# Add CORS headers for /api/gpodder responses
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
proxy_pass http://localhost:8032;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Increase timeouts for potentially longer operations
proxy_read_timeout 300;
proxy_send_timeout 300;
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PUT' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Api-Key,Authorization' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain; charset=utf-8';
add_header 'Content-Length' 0;
return 204;
}
}
location /ws/api/data/ {
proxy_pass http://localhost:8032; # Pass the WebSocket connection to your backend
# WebSocket headers
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Optionally increase the timeout values for long-running WebSocket connections
proxy_read_timeout 86400;
proxy_send_timeout 86400;
}
location /ws/api/tasks/ {
proxy_pass http://localhost:8032; # Pass the WebSocket connection to your backend
# WebSocket headers
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Optionally increase the timeout values for long-running WebSocket connections
proxy_read_timeout 86400;
proxy_send_timeout 86400;
}
# location = /api/data/restore_server {
# client_max_body_size 0;
# proxy_pass http://localhost:8032;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# # You may not need the CORS headers specifically for this endpoint
# # unless you're expecting to call it directly from client-side JavaScript
# # in a browser. If it's called server-side or from a tool like Postman,
# # these CORS headers might be unnecessary. Adjust as needed.
# add_header 'Access-Control-Allow-Origin' '*' always;
# add_header 'Access-Control-Allow-Methods' 'POST' always;
# add_header 'Access-Control-Allow-Headers' 'Content-Type, Api-Key' always;
# }
# Correct MIME type for WebAssembly files
location ~* \.wasm$ {
types {
application/wasm wasm;
}
}
}
}

View File

@@ -0,0 +1,17 @@
command = "/usr/local/bin/gpodder-api"
start-after = ["pinepods-api.toml"]
stdout = "${HORUST_STDOUT_MODE}"
stderr = "${HORUST_STDERR_MODE}"
[restart]
strategy = "always"
backoff = "1s"
attempts = 0
[environment]
keep-env = true
additional = { DB_USER = "${DB_USER}", DB_HOST = "${DB_HOST}", DB_PORT = "${DB_PORT}", DB_NAME = "${DB_NAME}", DB_PASSWORD = "${DB_PASSWORD}", SERVER_PORT = "8042" }
[termination]
signal = "TERM"
wait = "10s"

View File

@@ -0,0 +1,16 @@
command = "nginx -g 'daemon off;'"
start-after = ["gpodder-api.toml"]
stdout = "${HORUST_STDOUT_MODE}"
stderr = "${HORUST_STDERR_MODE}"
[restart]
strategy = "always"
backoff = "1s"
attempts = 0
[environment]
keep-env = true
[termination]
signal = "TERM"
wait = "5s"

View File

@@ -0,0 +1,16 @@
command = "/usr/local/bin/pinepods-api"
stdout = "${HORUST_STDOUT_MODE}"
stderr = "${HORUST_STDERR_MODE}"
[restart]
strategy = "always"
backoff = "1s"
attempts = 0
[environment]
keep-env = true
additional = { DB_USER = "${DB_USER}", DB_PASSWORD = "${DB_PASSWORD}", DB_HOST = "${DB_HOST}", DB_NAME = "${DB_NAME}", DB_PORT = "${DB_PORT}", DB_TYPE = "${DB_TYPE}", FULLNAME = "${FULLNAME}", USERNAME = "${USERNAME}", EMAIL = "${EMAIL}", PASSWORD = "${PASSWORD}", REVERSE_PROXY = "${REVERSE_PROXY}", SEARCH_API_URL = "${SEARCH_API_URL}", PEOPLE_API_URL = "${PEOPLE_API_URL}", PINEPODS_PORT = "${PINEPODS_PORT}", PROXY_PROTOCOL = "${PROXY_PROTOCOL}", DEBUG_MODE = "${DEBUG_MODE}", VALKEY_HOST = "${VALKEY_HOST}", VALKEY_PORT = "${VALKEY_PORT}" }
[termination]
signal = "TERM"
wait = "5s"

View File

@@ -0,0 +1,257 @@
#!/usr/bin/env python3
"""
New Idempotent Database Setup for PinePods
This script replaces the old setupdatabase.py and setuppostgresdatabase.py
with a proper migration-based system that is fully idempotent.
"""
import os
import sys
import logging
from pathlib import Path
# Set up basic configuration for logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Add pinepods directory to sys.path for module import
pinepods_path = Path(__file__).parent.parent
sys.path.insert(0, str(pinepods_path))
sys.path.insert(0, '/pinepods') # Also add the container path for Docker
def wait_for_postgresql_ready():
"""Wait for PostgreSQL to be ready to accept connections (not just port open)"""
import time
import psycopg
db_host = os.environ.get("DB_HOST", "127.0.0.1")
db_port = os.environ.get("DB_PORT", "5432")
db_user = os.environ.get("DB_USER", "postgres")
db_password = os.environ.get("DB_PASSWORD", "password")
max_attempts = 30 # 30 seconds
attempt = 1
logger.info(f"Waiting for PostgreSQL at {db_host}:{db_port} to be ready...")
while attempt <= max_attempts:
try:
# Try to connect to the postgres database
with psycopg.connect(
host=db_host,
port=db_port,
user=db_user,
password=db_password,
dbname='postgres',
connect_timeout=3
) as conn:
with conn.cursor() as cur:
# Test if PostgreSQL is ready to accept queries
cur.execute("SELECT 1")
cur.fetchone()
logger.info(f"PostgreSQL is ready after {attempt} attempts")
return True
except Exception as e:
if "not yet accepting connections" in str(e) or "recovery" in str(e).lower():
logger.info(f"PostgreSQL not ready yet (attempt {attempt}/{max_attempts}): {e}")
else:
logger.warning(f"Connection attempt {attempt}/{max_attempts} failed: {e}")
if attempt < max_attempts:
time.sleep(1)
attempt += 1
logger.error(f"PostgreSQL failed to become ready after {max_attempts} attempts")
return False
def wait_for_mysql_ready():
"""Wait for MySQL/MariaDB to be ready to accept connections"""
import time
try:
import mariadb as mysql_connector
except ImportError:
import mysql.connector
db_host = os.environ.get("DB_HOST", "127.0.0.1")
db_port = int(os.environ.get("DB_PORT", "3306"))
db_user = os.environ.get("DB_USER", "root")
db_password = os.environ.get("DB_PASSWORD", "password")
max_attempts = 30 # 30 seconds
attempt = 1
logger.info(f"Waiting for MySQL/MariaDB at {db_host}:{db_port} to be ready...")
while attempt <= max_attempts:
try:
# Try to connect to MySQL/MariaDB
conn = mysql_connector.connect(
host=db_host,
port=db_port,
user=db_user,
password=db_password,
connect_timeout=3,
autocommit=True
)
cursor = conn.cursor()
# Test if MySQL is ready to accept queries
cursor.execute("SELECT 1")
cursor.fetchone()
cursor.close()
conn.close()
logger.info(f"MySQL/MariaDB is ready after {attempt} attempts")
return True
except Exception as e:
logger.info(f"MySQL/MariaDB not ready yet (attempt {attempt}/{max_attempts}): {e}")
if attempt < max_attempts:
time.sleep(1)
attempt += 1
logger.error(f"MySQL/MariaDB failed to become ready after {max_attempts} attempts")
return False
def create_database_if_not_exists():
"""Create the database if it doesn't exist and wait for database to be ready"""
db_type = os.environ.get("DB_TYPE", "postgresql").lower()
if db_type in ['postgresql', 'postgres']:
# First, wait for PostgreSQL to be ready
if not wait_for_postgresql_ready():
raise Exception("PostgreSQL did not become ready in time")
else:
# Wait for MySQL/MariaDB to be ready
if not wait_for_mysql_ready():
raise Exception("MySQL/MariaDB did not become ready in time")
logger.info("MySQL/MariaDB is ready (database creation handled by container)")
return
# PostgreSQL database creation logic continues below
if not wait_for_postgresql_ready():
raise Exception("PostgreSQL did not become ready in time")
try:
import psycopg
# Database connection parameters
db_host = os.environ.get("DB_HOST", "127.0.0.1")
db_port = os.environ.get("DB_PORT", "5432")
db_user = os.environ.get("DB_USER", "postgres")
db_password = os.environ.get("DB_PASSWORD", "password")
db_name = os.environ.get("DB_NAME", "pinepods_database")
# Connect to the default 'postgres' database to check/create target database
with psycopg.connect(
host=db_host,
port=db_port,
user=db_user,
password=db_password,
dbname='postgres'
) as conn:
conn.autocommit = True
with conn.cursor() as cur:
# Check if the database exists
cur.execute("SELECT 1 FROM pg_database WHERE datname = %s", (db_name,))
exists = cur.fetchone()
if not exists:
logger.info(f"Database {db_name} does not exist. Creating...")
cur.execute(f"CREATE DATABASE {db_name}")
logger.info(f"Database {db_name} created successfully.")
else:
logger.info(f"Database {db_name} already exists.")
except ImportError:
logger.error("psycopg not available for PostgreSQL database creation")
raise
except Exception as e:
logger.error(f"Error creating database: {e}")
raise
def ensure_usernames_lowercase():
"""Ensure all usernames are lowercase for consistency"""
try:
from database_functions.migrations import get_migration_manager
manager = get_migration_manager()
conn = manager.get_connection()
cursor = conn.cursor()
db_type = manager.db_type
table_name = '"Users"' if db_type == 'postgresql' else 'Users'
try:
cursor.execute(f'SELECT UserID, Username FROM {table_name}')
users = cursor.fetchall()
for user_id, username in users:
if username and username != username.lower():
cursor.execute(
f'UPDATE {table_name} SET Username = %s WHERE UserID = %s',
(username.lower(), user_id)
)
logger.info(f"Updated Username for UserID {user_id} to lowercase")
conn.commit()
logger.info("Username normalization completed")
finally:
cursor.close()
manager.close_connection()
except Exception as e:
logger.error(f"Error normalizing usernames: {e}")
def ensure_web_api_key_file():
"""Deprecated: Web API key file removed for security reasons"""
logger.info("Web API key file creation skipped - background tasks now authenticate via database")
def main():
"""Main setup function"""
try:
logger.info("Starting PinePods database setup...")
# Step 1: Create database if needed (PostgreSQL only)
create_database_if_not_exists()
# Step 2: Import and register all migrations
logger.info("Loading migration definitions...")
import database_functions.migration_definitions
database_functions.migration_definitions.register_all_migrations()
# Step 3: Run migrations
logger.info("Running database migrations...")
from database_functions.migrations import run_all_migrations
success = run_all_migrations()
if not success:
logger.error("Database migrations failed!")
return False
# Step 4: Ensure username consistency
logger.info("Ensuring username consistency...")
ensure_usernames_lowercase()
# Step 5: Ensure web API key file exists
logger.info("Ensuring web API key file exists...")
ensure_web_api_key_file()
logger.info("Database setup completed successfully!")
logger.info("Database validation complete")
return True
except Exception as e:
logger.error(f"Database setup failed: {e}")
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

179
PinePods-0.8.2/startup/startup.sh Executable file
View File

@@ -0,0 +1,179 @@
#!/bin/bash
set -e # Exit immediately if a command exits with a non-zero status
# Function to handle shutdown
shutdown() {
echo "Shutting down..."
pkill -TERM horust
exit 0
}
# Set up signal handling
trap shutdown SIGTERM SIGINT
# Export all environment variables
export DB_USER=$DB_USER
export DB_PASSWORD=$DB_PASSWORD
export DB_HOST=$DB_HOST
export DB_NAME=$DB_NAME
export DB_PORT=$DB_PORT
export DB_TYPE=$DB_TYPE
export FULLNAME=${FULLNAME}
export USERNAME=${USERNAME}
export EMAIL=${EMAIL}
export PASSWORD=${PASSWORD}
export REVERSE_PROXY=$REVERSE_PROXY
export SEARCH_API_URL=$SEARCH_API_URL
export PEOPLE_API_URL=$PEOPLE_API_URL
export PINEPODS_PORT=$PINEPODS_PORT
export PROXY_PROTOCOL=$PROXY_PROTOCOL
export DEBUG_MODE=${DEBUG_MODE:-'False'}
export VALKEY_HOST=${VALKEY_HOST:-'valkey'}
export VALKEY_PORT=${VALKEY_PORT:-'6379'}
export DEFAULT_LANGUAGE=${DEFAULT_LANGUAGE:-'en'}
# Save user's HOSTNAME to SERVER_URL before Docker overwrites it with container ID
# This preserves the user-configured server URL for RSS feed generation
export SERVER_URL=${HOSTNAME}
# Export OIDC environment variables
export OIDC_DISABLE_STANDARD_LOGIN=${OIDC_DISABLE_STANDARD_LOGIN:-'false'}
export OIDC_PROVIDER_NAME=${OIDC_PROVIDER_NAME}
export OIDC_CLIENT_ID=${OIDC_CLIENT_ID}
export OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET}
export OIDC_AUTHORIZATION_URL=${OIDC_AUTHORIZATION_URL}
export OIDC_TOKEN_URL=${OIDC_TOKEN_URL}
export OIDC_USER_INFO_URL=${OIDC_USER_INFO_URL}
export OIDC_BUTTON_TEXT=${OIDC_BUTTON_TEXT}
export OIDC_SCOPE=${OIDC_SCOPE}
export OIDC_BUTTON_COLOR=${OIDC_BUTTON_COLOR}
export OIDC_BUTTON_TEXT_COLOR=${OIDC_BUTTON_TEXT_COLOR}
export OIDC_ICON_SVG=${OIDC_ICON_SVG}
export OIDC_NAME_CLAIM=${OIDC_NAME_CLAIM}
export OIDC_EMAIL_CLAIM=${OIDC_EMAIL_CLAIM}
export OIDC_USERNAME_CLAIM=${OIDC_USERNAME_CLAIM}
export OIDC_ROLES_CLAIM=${OIDC_ROLES_CLAIM}
export OIDC_USER_ROLE=${OIDC_USER_ROLE}
export OIDC_ADMIN_ROLE=${OIDC_ADMIN_ROLE}
# Print admin info if default admin is used
if [[ $FULLNAME == 'Pinepods Admin' ]]; then
echo "Admin User Information:"
echo "FULLNAME: $FULLNAME"
echo "USERNAME: $USERNAME"
echo "EMAIL: $EMAIL"
echo "PASSWORD: $PASSWORD"
fi
# Print PinePods logo
cat << "EOF"
A
d$b
.d\$$b.
.d$i$$\$$b. _______ ** **
d$$@b / \ / | / |
d\$$$ib $$$$$$$ |$$/ _______ ______ ______ ______ ____$$ | _______
.d$$$\$$$b $$ |__$$ |/ |/ \ / \ / \ / \ / $$ | / |
.d$$@$$$$\$$ib. $$ $$/ $$ |$$$$$$$ |/$$$$$$ |/$$$$$$ |/$$$$$$ |/$$$$$$$ |/$$$$$$$/
d$$i$$b $$$$$$$/ $$ |$$ | $$ |$$ $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$ \
d\$$$$@$b. $$ | $$ |$$ | $$ |$$$$$$$$/ $$ |__$$ |$$ \__$$ |$$ \__$$ | $$$$$$ |
.d$@$$\$$$$$@b. $$ | $$ |$$ | $$ |$$ |$$ $$/ $$ $$/ $$ $$ |/ $$/
.d$$$$i$$$\$$$$$$b. $$/ $$/ $$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$$$$$$/ $$$$$$$/
### $$ |
### $$ |
### $$/
A project created and written by Collin Pendleton
collinp@gooseberrydevelopment.com
EOF
# Configure timezone based on TZ environment variable
if [ -n "$TZ" ]; then
echo "Setting timezone to $TZ"
# For Alpine, we need to copy the zoneinfo file
if [ -f "/usr/share/zoneinfo/$TZ" ]; then
# Check if /etc/localtime is a mounted volume
if [ -f "/etc/localtime" ] && ! [ -L "/etc/localtime" ]; then
echo "Using mounted timezone file from host"
else
# If it's not mounted or is a symlink, we can modify it
cp /usr/share/zoneinfo/$TZ /etc/localtime
echo "$TZ" > /etc/timezone
fi
else
echo "Timezone $TZ not found, using UTC"
# Only modify if not mounted
if ! [ -f "/etc/localtime" ] || [ -L "/etc/localtime" ]; then
cp /usr/share/zoneinfo/UTC /etc/localtime
echo "UTC" > /etc/timezone
fi
fi
else
echo "No timezone specified, using UTC"
# Only modify if not mounted
if ! [ -f "/etc/localtime" ] || [ -L "/etc/localtime" ]; then
cp /usr/share/zoneinfo/UTC /etc/localtime
echo "UTC" > /etc/timezone
fi
fi
# Export TZ to the environment for all child processes
export TZ
# Create required directories
echo "Creating required directories..."
mkdir -p /pinepods/cache
mkdir -p /opt/pinepods/backups
mkdir -p /opt/pinepods/downloads
mkdir -p /opt/pinepods/certs
mkdir -p /var/log/pinepods # Make sure log directory exists
# Database Setup
echo "Using $DB_TYPE database"
# Use compiled database setup binary (no Python dependency)
# Web API key file creation has been removed for security
/usr/local/bin/pinepods-db-setup
echo "Database validation complete"
# Cron jobs removed - now handled by internal Rust scheduler
# Check if we need to create exim directories
# Only do this if the user/group exists on the system
if getent group | grep -q "Debian-exim"; then
echo "Setting up exim directories and permissions..."
mkdir -p /var/log/exim4
mkdir -p /var/spool/exim4
chown -R Debian-exim:Debian-exim /var/log/exim4
chown -R Debian-exim:Debian-exim /var/spool/exim4
else
echo "Skipping exim setup as user/group doesn't exist on this system"
fi
# Set up environment variables for Horust logging modes
if [[ $DEBUG_MODE == "true" ]]; then
export HORUST_STDOUT_MODE="STDOUT"
export HORUST_STDERR_MODE="STDERR"
echo "Starting Horust in debug mode (logs to stdout)..."
else
export HORUST_STDOUT_MODE="/var/log/pinepods/service.log"
export HORUST_STDERR_MODE="/var/log/pinepods/service.log"
echo "Starting Horust in production mode (logs to files)..."
fi
# Set permissions for download and backup directories BEFORE starting services
# Only do this if PUID and PGID are set
if [[ -n "$PUID" && -n "$PGID" ]]; then
echo "Setting permissions for download and backup directories...(Be patient this might take a while if you have a lot of downloads)"
chown -R ${PUID}:${PGID} /opt/pinepods/downloads
chown -R ${PUID}:${PGID} /opt/pinepods/backups
else
echo "Skipping permission setting as PUID/PGID are not set"
fi
# Copy service configurations to Horust directory
cp /pinepods/startup/services/*.toml /etc/horust/services/
# Start all services with Horust
echo "Starting services with Horust..."
echo "PinePods startup complete, running Horust in foreground..."
exec horust --services-path /etc/horust/services/

View File

@@ -0,0 +1,40 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log ; main log file
loglevel=info ; log level
# Add TZ to the global environment
environment=TZ="%(ENV_TZ)s"
# crond removed - background tasks now handled by internal Rust scheduler
# Legacy startup/refresh tasks removed - now handled by internal Rust scheduler
# The Rust API now handles all background tasks internally with tokio-cron-scheduler
[program:client_api]
command=/usr/local/bin/pinepods-api
redirect_stderr=true
stdout_logfile=/var/log/supervisor/client_api.log
stderr_logfile=/var/log/supervisor/client_api.log
stdout_logfile_maxbytes=10000
stopwaitsecs=5
[program:gpodder_api]
command=bash -c 'export DB_USER="%(ENV_DB_USER)s"; export DB_HOST="%(ENV_DB_HOST)s"; export DB_PORT="%(ENV_DB_PORT)s"; export DB_NAME="%(ENV_DB_NAME)s"; export DB_PASSWORD="%(ENV_DB_PASSWORD)s"; export SERVER_PORT=8042; /usr/local/bin/gpodder-api'
autostart=true
autorestart=true
redirect_stderr=true
stdout_logfile=/var/log/supervisor/gpodder_api.log
stderr_logfile=/var/log/supervisor/gpodder_api.log
stdout_logfile_maxbytes=10000
stopwaitsecs=10
[program:main_app]
command=nginx -g 'daemon off;'
redirect_stderr=true
stdout_logfile=/var/log/supervisor/nginx.log
stderr_logfile=/var/log/supervisor/nginx_error.log
stdout_logfile_maxbytes=10000
stopwaitsecs=5

View File

@@ -0,0 +1,39 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log ; main log file
loglevel=info ; log level
# Add TZ to the global environment
environment=TZ="%(ENV_TZ)s"
[program:crond]
command=crond -f ; Run cron in the foreground
autorestart=true
redirect_stderr=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
[program:client_api]
command=/usr/local/bin/pinepods-api
redirect_stderr=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
[program:gpodder_api]
command=bash -c 'export DB_USER="%(ENV_DB_USER)s"; export DB_HOST="%(ENV_DB_HOST)s"; export DB_PORT="%(ENV_DB_PORT)s"; export DB_NAME="%(ENV_DB_NAME)s"; export DB_PASSWORD="%(ENV_DB_PASSWORD)s"; export SERVER_PORT=8042; /usr/local/bin/gpodder-api'
autostart=true
autorestart=true
redirect_stderr=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=10
[program:main_app]
command=nginx -g 'daemon off;'
redirect_stderr=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0