This commit is contained in:
2026-03-03 11:00:57 -05:00
parent a146a2b5c3
commit 108b714fcb
811 changed files with 2 additions and 218971 deletions

3
.gitignore vendored
View File

@@ -1 +1,2 @@
/PinePods-0.8.2 /PinePods-0.8.2
PinePods-0.8.2/

View File

@@ -1 +0,0 @@
web/target/*

View File

@@ -1,2 +0,0 @@
# Auto detect text files and perform LF normalization
* text=auto

View File

@@ -1,14 +0,0 @@
# These are supported funding model platforms
github: madeofpendletonwool
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
polar: # Replace with a single Polar username
buy_me_a_coffee: collinscoffee
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -1,16 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/web" # Location of package manifests
schedule:
interval: "weekly"
- package-ecosystem: "pip" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"

View File

@@ -1,406 +0,0 @@
name: Database Backwards Compatibility Test
on:
push:
branches: [main]
pull_request:
branches: [main]
env:
TEST_DB_PASSWORD: "test_password_123!"
TEST_DB_NAME: "pinepods_test_db"
jobs:
test-mysql-compatibility:
runs-on: ubuntu-latest
services:
mysql:
image: mysql:latest
env:
MYSQL_ROOT_PASSWORD: test_password_123!
MYSQL_DATABASE: pinepods_test_db
ports:
- 3306:3306
options: >-
--health-cmd="mysqladmin ping"
--health-interval=10s
--health-timeout=5s
--health-retries=3
valkey:
image: valkey/valkey:8-alpine
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get previous release tag
id: get_previous_tag
run: |
# Get the latest stable release (exclude rc, alpha, beta)
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -E '^[0-9]+\.[0-9]+\.[0-9]+$' | head -n 1)
if [ -z "$PREVIOUS_TAG" ]; then
echo "No stable release tag found, using 0.7.9 as baseline"
PREVIOUS_TAG="0.7.9"
fi
echo "previous_tag=$PREVIOUS_TAG" >> $GITHUB_OUTPUT
echo "Using previous tag: $PREVIOUS_TAG"
- name: Start previous PinePods version
run: |
echo "🚀 Starting PinePods ${{ steps.get_previous_tag.outputs.previous_tag }}"
# Create docker-compose for previous version
cat > docker-compose.previous.yml << EOF
version: '3.8'
services:
pinepods_previous:
image: madeofpendletonwool/pinepods:${{ steps.get_previous_tag.outputs.previous_tag }}
environment:
DB_TYPE: mysql
DB_HOST: mysql
DB_PORT: 3306
DB_USER: root
DB_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
DB_NAME: ${{ env.TEST_DB_NAME }}
VALKEY_HOST: valkey
VALKEY_PORT: 6379
HOSTNAME: 'http://localhost:8040'
DEBUG_MODE: true
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
PEOPLE_API_URL: 'https://people.pinepods.online'
ports:
- "8040:8040"
depends_on:
- mysql
- valkey
networks:
- test_network
mysql:
image: mysql:8.0
environment:
MYSQL_ROOT_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
MYSQL_DATABASE: ${{ env.TEST_DB_NAME }}
networks:
- test_network
valkey:
image: valkey/valkey:8-alpine
networks:
- test_network
networks:
test_network:
driver: bridge
EOF
# Start previous version and wait for it to be ready
docker compose -f docker-compose.previous.yml up -d
# Wait for services to be ready
echo "⏳ Waiting for previous version to initialize..."
sleep 30
# Check if previous version is responding
timeout 60 bash -c 'while ! curl -f http://localhost:8040/api/pinepods_check; do sleep 5; done'
echo "✅ Previous version (${{ steps.get_previous_tag.outputs.previous_tag }}) is ready"
- name: Stop previous version
run: |
echo "🛑 Stopping previous PinePods version"
docker compose -f docker-compose.previous.yml stop pinepods_previous
echo "✅ Previous version stopped (database preserved)"
- name: Build current version
run: |
echo "🔨 Building current PinePods version from source"
docker build -f dockerfile -t pinepods-current:test .
echo "✅ Build complete"
- name: Start current version
run: |
# Create docker-compose for current version
cat > docker-compose.current.yml << EOF
version: '3.8'
services:
pinepods_current:
image: pinepods-current:test
environment:
DB_TYPE: mysql
DB_HOST: mysql
DB_PORT: 3306
DB_USER: root
DB_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
DB_NAME: ${{ env.TEST_DB_NAME }}
VALKEY_HOST: valkey
VALKEY_PORT: 6379
HOSTNAME: 'http://localhost:8040'
DEBUG_MODE: true
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
PEOPLE_API_URL: 'https://people.pinepods.online'
ports:
- "8040:8040"
depends_on:
- mysql
- valkey
networks:
- test_network
mysql:
image: mysql:8.0
environment:
MYSQL_ROOT_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
MYSQL_DATABASE: ${{ env.TEST_DB_NAME }}
networks:
- test_network
valkey:
image: valkey/valkey:8-alpine
networks:
- test_network
networks:
test_network:
driver: bridge
EOF
echo "🚀 Starting current PinePods version"
# Start current version
docker compose -f docker-compose.current.yml up -d pinepods_current
# Wait for current version to be ready
echo "⏳ Waiting for current version to initialize..."
sleep 60
# Check if current version is responding
timeout 120 bash -c 'while ! curl -f http://localhost:8040/api/pinepods_check; do echo "Waiting for current version..."; sleep 10; done'
echo "✅ Current version is ready"
- name: Build validator and validate upgraded database
run: |
echo "🔨 Building database validator"
docker build -f Dockerfile.validator -t pinepods-validator .
echo "🔍 Validating upgraded database schema"
docker run --rm --network pinepods_test_network \
-e DB_TYPE=mysql \
-e DB_HOST=mysql \
-e DB_PORT=3306 \
-e DB_USER=root \
-e DB_PASSWORD=${{ env.TEST_DB_PASSWORD }} \
-e DB_NAME=${{ env.TEST_DB_NAME }} \
pinepods-validator
- name: Test basic functionality
run: |
echo "🧪 Testing basic API functionality"
# Test health endpoint
curl -f http://localhost:8040/api/health || exit 1
# Test pinepods check endpoint
curl -f http://localhost:8040/api/pinepods_check || exit 1
echo "✅ Basic functionality tests passed"
- name: Cleanup
if: always()
run: |
echo "🧹 Cleaning up test environment"
docker compose -f docker-compose.previous.yml down -v || true
docker compose -f docker-compose.current.yml down -v || true
test-postgresql-compatibility:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: test_password_123!
POSTGRES_DB: pinepods_test_db
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
valkey:
image: valkey/valkey:8-alpine
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get previous release tag
id: get_previous_tag
run: |
# Get the latest stable release (exclude rc, alpha, beta)
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -E '^[0-9]+\.[0-9]+\.[0-9]+$' | head -n 1)
if [ -z "$PREVIOUS_TAG" ]; then
echo "No stable release tag found, using 0.7.9 as baseline"
PREVIOUS_TAG="0.7.9"
fi
echo "previous_tag=$PREVIOUS_TAG" >> $GITHUB_OUTPUT
echo "Using previous tag: $PREVIOUS_TAG"
- name: Start previous PinePods version
run: |
echo "🚀 Starting PinePods ${{ steps.get_previous_tag.outputs.previous_tag }} (PostgreSQL)"
cat > docker-compose.postgres-previous.yml << EOF
version: '3.8'
services:
pinepods_previous:
image: madeofpendletonwool/pinepods:${{ steps.get_previous_tag.outputs.previous_tag }}
environment:
DB_TYPE: postgresql
DB_HOST: postgres
DB_PORT: 5432
DB_USER: postgres
DB_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
DB_NAME: ${{ env.TEST_DB_NAME }}
VALKEY_HOST: valkey
VALKEY_PORT: 6379
HOSTNAME: 'http://localhost:8040'
DEBUG_MODE: true
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
PEOPLE_API_URL: 'https://people.pinepods.online'
ports:
- "8040:8040"
depends_on:
- postgres
- valkey
networks:
- test_network
postgres:
image: postgres:latest
environment:
POSTGRES_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
POSTGRES_DB: ${{ env.TEST_DB_NAME }}
networks:
- test_network
valkey:
image: valkey/valkey:8-alpine
networks:
- test_network
networks:
test_network:
driver: bridge
EOF
docker compose -f docker-compose.postgres-previous.yml up -d
sleep 30
timeout 60 bash -c 'while ! curl -f http://localhost:8040/api/pinepods_check; do sleep 5; done'
- name: Stop previous version
run: |
echo "🛑 Stopping previous PinePods version"
docker compose -f docker-compose.postgres-previous.yml stop pinepods_previous
echo "✅ Previous version stopped (database preserved)"
- name: Build current version (PostgreSQL)
run: |
echo "🔨 Building current PinePods version from source"
docker build -f dockerfile -t pinepods-current:test .
echo "✅ Build complete"
- name: Test current version (PostgreSQL)
run: |
echo "🚀 Starting current PinePods version with PostgreSQL"
# Create docker-compose for current version
cat > docker-compose.postgres-current.yml << EOF
version: '3.8'
services:
pinepods_current:
image: pinepods-current:test
environment:
DB_TYPE: postgresql
DB_HOST: postgres
DB_PORT: 5432
DB_USER: postgres
DB_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
DB_NAME: ${{ env.TEST_DB_NAME }}
VALKEY_HOST: valkey
VALKEY_PORT: 6379
HOSTNAME: 'http://localhost:8040'
DEBUG_MODE: true
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
PEOPLE_API_URL: 'https://people.pinepods.online'
ports:
- "8040:8040"
depends_on:
- postgres
- valkey
networks:
- test_network
postgres:
image: postgres:latest
environment:
POSTGRES_PASSWORD: ${{ env.TEST_DB_PASSWORD }}
POSTGRES_DB: ${{ env.TEST_DB_NAME }}
networks:
- test_network
valkey:
image: valkey/valkey:8-alpine
networks:
- test_network
networks:
test_network:
driver: bridge
EOF
# Start current version
docker compose -f docker-compose.postgres-current.yml up -d pinepods_current
# Wait for current version to be ready
echo "⏳ Waiting for current version to initialize..."
sleep 60
# Check if current version is responding
timeout 120 bash -c 'while ! curl -f http://localhost:8040/api/pinepods_check; do echo "Waiting for current version..."; sleep 10; done'
echo "✅ Current version is ready"
- name: Build validator and validate upgraded database (PostgreSQL)
run: |
echo "🔨 Building PostgreSQL database validator"
docker build -f Dockerfile.validator.postgres -t pinepods-validator-postgres .
echo "🔍 Validating upgraded database schema"
docker run --rm --network pinepods_test_network \
-e DB_TYPE=postgresql \
-e DB_HOST=postgres \
-e DB_PORT=5432 \
-e DB_USER=postgres \
-e DB_PASSWORD=${{ env.TEST_DB_PASSWORD }} \
-e DB_NAME=${{ env.TEST_DB_NAME }} \
pinepods-validator-postgres
- name: Cleanup
if: always()
run: |
docker compose -f docker-compose.postgres-previous.yml down -v || true
docker compose -f docker-compose.postgres-current.yml down -v || true

View File

@@ -1,115 +0,0 @@
permissions:
contents: write
name: Build Android Flutter App
on:
push:
tags:
- "*"
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: "Manual override version tag (optional)"
required: false
jobs:
build:
name: Build Android Release
runs-on: ubuntu-latest
steps:
- name: Set Image Tag
run: echo "IMAGE_TAG=${{ github.event.release.tag_name || github.event.inputs.version || 'latest' }}" >> $GITHUB_ENV
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch full git history for accurate commit count
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK 17
uses: actions/setup-java@v4
with:
java-version: "17"
distribution: "temurin"
- name: Setup Android SDK
uses: android-actions/setup-android@v3
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: "3.35.2"
channel: "stable"
- name: Install dependencies
run: |
cd mobile
flutter pub get
- name: Setup Android signing
run: |
cd mobile/android
echo "storePassword=${{ secrets.ANDROID_STORE_PASSWORD }}" > key.properties
echo "keyPassword=${{ secrets.ANDROID_KEY_PASSWORD }}" >> key.properties
echo "keyAlias=${{ secrets.ANDROID_KEY_ALIAS }}" >> key.properties
echo "storeFile=../upload-keystore.jks" >> key.properties
echo "${{ secrets.ANDROID_KEYSTORE_BASE64 }}" | base64 -d > upload-keystore.jks
- name: Verify version files
run: |
cd mobile
echo "Current version in pubspec.yaml:"
grep "^version:" pubspec.yaml
echo "Current version in environment.dart:"
grep "_projectVersion\|_build" lib/core/environment.dart
echo "Build will use versions exactly as they are in the repository"
- name: Build APK
run: |
cd mobile
flutter build apk --release --split-per-abi
- name: Build AAB
run: |
cd mobile
flutter build appbundle --release
- name: Rename APK files
run: |
cd mobile/build/app/outputs/flutter-apk
# Extract version from IMAGE_TAG (remove 'v' prefix if present)
VERSION=${IMAGE_TAG#v}
if [[ "$VERSION" == "latest" ]]; then
VERSION="0.0.0"
fi
# Rename APK files with proper naming convention
mv app-armeabi-v7a-release.apk pinepods-armeabi-${VERSION}.apk
mv app-arm64-v8a-release.apk pinepods-arm64-${VERSION}.apk
mv app-x86_64-release.apk pinepods-x86_64-${VERSION}.apk
- name: Upload APK artifacts
uses: actions/upload-artifact@v4
with:
name: android-apk-builds
path: mobile/build/app/outputs/flutter-apk/pinepods-*.apk
- name: Upload AAB artifact
uses: actions/upload-artifact@v4
with:
name: android-aab-build
path: mobile/build/app/outputs/bundle/release/app-release.aab
# - name: Upload to Google Play Store
# if: github.event_name == 'release'
# env:
# GOOGLE_PLAY_SERVICE_ACCOUNT_JSON: ${{ secrets.GOOGLE_PLAY_SERVICE_ACCOUNT_JSON }}
# run: |
# echo "$GOOGLE_PLAY_SERVICE_ACCOUNT_JSON" > service-account.json
# # Install fastlane if needed for Play Store upload
# # gem install fastlane
# # fastlane supply --aab mobile/build/app/outputs/bundle/release/app-release.aab --json_key service-account.json --package_name com.gooseberrydevelopment.pinepods --track production

View File

@@ -1,124 +0,0 @@
name: Build Pinepods Flatpak
on:
workflow_run:
workflows: ["Build Tauri Clients"]
types:
- completed
workflow_dispatch:
inputs:
version:
description: "Version to build (for testing)"
required: true
default: "test"
env:
FLATPAK_ID: com.gooseberrydevelopment.pinepods
jobs:
build-flatpak:
runs-on: ubuntu-latest
if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Flatpak
run: |
sudo apt-get update
sudo apt-get install -y flatpak flatpak-builder appstream
- name: Install Flatpak SDK
run: |
flatpak remote-add --user --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
flatpak install --user -y flathub org.gnome.Platform//47 org.gnome.Sdk//47
- name: Clone Flathub repo
run: |
git clone https://github.com/flathub/com.gooseberrydevelopment.pinepods flathub-repo
cp flathub-repo/com.gooseberrydevelopment.pinepods.yml .
- name: Set VERSION variable
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
else
LATEST_RELEASE=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .tag_name)
echo "VERSION=$LATEST_RELEASE" >> $GITHUB_ENV
fi
- name: Download DEBs and calculate checksums
run: |
# Download both DEBs
curl -L "https://github.com/${{ github.repository }}/releases/download/$VERSION/Pinepods_${VERSION}_amd64.deb" -o amd64.deb
curl -L "https://github.com/${{ github.repository }}/releases/download/$VERSION/Pinepods_${VERSION}_arm64.deb" -o arm64.deb
# Calculate and display checksums
AMD64_SHA256=$(sha256sum amd64.deb | cut -d' ' -f1)
ARM64_SHA256=$(sha256sum arm64.deb | cut -d' ' -f1)
echo "Calculated AMD64 SHA256: $AMD64_SHA256"
echo "Calculated ARM64 SHA256: $ARM64_SHA256"
# Export to environment
echo "AMD64_SHA256=$AMD64_SHA256" >> $GITHUB_ENV
echo "ARM64_SHA256=$ARM64_SHA256" >> $GITHUB_ENV
- name: Update manifest version and URL
run: |
echo "Updating manifest for version: $VERSION"
# Show environment variables
echo "Using AMD64 SHA256: $AMD64_SHA256"
echo "Using ARM64 SHA256: $ARM64_SHA256"
# Update AMD64 entry first
sed -i "/.*amd64.deb/,/sha256:/ s|sha256: .*|sha256: $AMD64_SHA256|" com.gooseberrydevelopment.pinepods.yml
# Update ARM64 entry second
sed -i "/.*arm64.deb/,/sha256:/ s|sha256: .*|sha256: $ARM64_SHA256|" com.gooseberrydevelopment.pinepods.yml
# Update URLs
sed -i "s|url: .*amd64.deb|url: https://github.com/${{ github.repository }}/releases/download/$VERSION/Pinepods_${VERSION}_amd64.deb|" com.gooseberrydevelopment.pinepods.yml
sed -i "s|url: .*arm64.deb|url: https://github.com/${{ github.repository }}/releases/download/$VERSION/Pinepods_${VERSION}_arm64.deb|" com.gooseberrydevelopment.pinepods.yml
echo "Updated manifest content:"
cat com.gooseberrydevelopment.pinepods.yml
- name: Get shared Modules
run: |
git clone https://github.com/flathub/shared-modules
# Test build steps
- name: Build and test Flatpak
run: |
flatpak-builder --force-clean --sandbox --user --install-deps-from=flathub --ccache \
--mirror-screenshots-url=https://dl.flathub.org/media/ --repo=repo builddir \
com.gooseberrydevelopment.pinepods.yml
flatpak remote-add --user --no-gpg-verify test-repo "$(pwd)/repo"
flatpak install --user -y test-repo ${{ env.FLATPAK_ID }}
# Basic launch test (timeout after 30s)
timeout 30s flatpak run ${{ env.FLATPAK_ID }} || true
# Verify metainfo
flatpak run --command=cat ${{ env.FLATPAK_ID }} \
/app/share/metainfo/${{ env.FLATPAK_ID }}.metainfo.xml
- name: Create Flatpak bundle
run: |
flatpak build-bundle repo ${{ env.FLATPAK_ID }}.flatpak ${{ env.FLATPAK_ID }}
# Archive everything needed for the Flathub PR
- name: Archive Flatpak files
run: |
mkdir flatpak_output
cp ${{ env.FLATPAK_ID }}.flatpak flatpak_output/
cp com.gooseberrydevelopment.pinepods.yml flatpak_output/
tar -czvf flatpak_files.tar.gz flatpak_output
- name: Upload Flatpak archive
uses: actions/upload-artifact@v4
with:
name: flatpak-files
path: flatpak_files.tar.gz

View File

@@ -1,82 +0,0 @@
name: Build Helm Chart
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: "Manual override version tag (optional)"
required: false
env:
REGISTRY: docker.io
IMAGE_NAME: madeofpendletonwool/pinepods
CHART_NAME: Pinepods
jobs:
build-helm-chart:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
token: ${{ secrets.PUSH_PAT }}
persist-credentials: true
- name: Setup Helm
uses: Azure/setup-helm@v4.2.0
- name: Install yq
run: |
sudo wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
- name: Set Chart Version
run: |
if [ -n "${{ github.event.release.tag_name }}" ]; then
version=${{ github.event.release.tag_name }}
elif [ -n "${{ github.event.inputs.version }}" ]; then
version=${{ github.event.inputs.version }}
else
echo "No version provided. Exiting."
exit 1
fi
echo "Setting chart version to $version"
yq e ".version = \"$version\"" -i deployment/kubernetes/helm/pinepods/Chart.yaml
- name: Package Helm chart
run: |
helm dependency update ./deployment/kubernetes/helm/pinepods
helm package ./deployment/kubernetes/helm/pinepods --destination ./docs
- name: Remove old Helm chart
run: |
ls docs/
find docs/ -type f -name "${CHART_NAME}-*.tgz" ! -name "${CHART_NAME}-${{ github.event.release.tag_name }}.tgz" -exec rm {} +
- name: Update Helm repo index
run: |
helm repo index docs --url https://helm.pinepods.online
- name: Fetch all branches
run: git fetch --all
- name: Fetch tags
run: git fetch --tags
- name: Checkout main branch
run: git checkout main
- uses: EndBug/add-and-commit@v9
with:
github_token: ${{ secrets.PUSH_PAT }}
committer_name: GitHub Actions
committer_email: actions@github.com
message: "Update Helm chart for release ${{ github.event.release.tag_name }}"
add: "docs"

View File

@@ -1,138 +0,0 @@
permissions:
contents: read
name: Build iOS Flutter App
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: "Manual override version tag (optional)"
required: false
jobs:
build:
name: Build iOS Release
runs-on: macOS-latest
steps:
- name: Set Image Tag
run: echo "IMAGE_TAG=${{ github.event.release.tag_name || github.event.inputs.version || 'latest' }}" >> $GITHUB_ENV
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.32.0'
channel: 'stable'
- name: Install dependencies
run: |
cd mobile
flutter pub get
cd ios
pod install
- name: Setup iOS signing
env:
IOS_CERTIFICATE_BASE64: ${{ secrets.IOS_CERTIFICATE_BASE64 }}
IOS_CERTIFICATE_PASSWORD: ${{ secrets.IOS_CERTIFICATE_PASSWORD }}
IOS_PROVISIONING_PROFILE_BASE64: ${{ secrets.IOS_PROVISIONING_PROFILE_BASE64 }}
KEYCHAIN_PASSWORD: ${{ secrets.KEYCHAIN_PASSWORD }}
run: |
# Create keychain
security create-keychain -p "$KEYCHAIN_PASSWORD" build.keychain
security default-keychain -s build.keychain
security unlock-keychain -p "$KEYCHAIN_PASSWORD" build.keychain
security set-keychain-settings -t 3600 -l build.keychain
# Import certificate
echo "$IOS_CERTIFICATE_BASE64" | base64 -d > certificate.p12
security import certificate.p12 -P "$IOS_CERTIFICATE_PASSWORD" -A
# Install provisioning profile
mkdir -p ~/Library/MobileDevice/Provisioning\ Profiles
echo "$IOS_PROVISIONING_PROFILE_BASE64" | base64 -d > ~/Library/MobileDevice/Provisioning\ Profiles/build.mobileprovision
- name: Update app version
run: |
cd mobile
# Update pubspec.yaml version
if [[ "$IMAGE_TAG" != "latest" ]]; then
sed -i '' "s/^version: .*/version: ${IMAGE_TAG#v}/" pubspec.yaml
fi
- name: Build iOS app
run: |
cd mobile
flutter build ios --release --no-codesign
- name: Archive and sign iOS app
run: |
cd mobile/ios
xcodebuild -workspace Runner.xcworkspace \
-scheme Runner \
-configuration Release \
-destination generic/platform=iOS \
-archivePath build/Runner.xcarchive \
archive
xcodebuild -exportArchive \
-archivePath build/Runner.xcarchive \
-exportPath build \
-exportOptionsPlist exportOptions.plist
- name: Create export options plist
run: |
cd mobile/ios
cat > exportOptions.plist << EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>method</key>
<string>app-store</string>
<key>teamID</key>
<string>${{ secrets.IOS_TEAM_ID }}</string>
<key>uploadBitcode</key>
<false/>
<key>uploadSymbols</key>
<true/>
<key>compileBitcode</key>
<false/>
</dict>
</plist>
EOF
- name: Upload IPA artifact
uses: actions/upload-artifact@v4
with:
name: ios-ipa-build
path: mobile/ios/build/*.ipa
- name: Upload to App Store Connect
if: github.event_name == 'release'
env:
APP_STORE_CONNECT_API_KEY_ID: ${{ secrets.APP_STORE_CONNECT_API_KEY_ID }}
APP_STORE_CONNECT_ISSUER_ID: ${{ secrets.APP_STORE_CONNECT_ISSUER_ID }}
APP_STORE_CONNECT_API_KEY_BASE64: ${{ secrets.APP_STORE_CONNECT_API_KEY_BASE64 }}
run: |
echo "$APP_STORE_CONNECT_API_KEY_BASE64" | base64 -d > AuthKey.p8
xcrun altool --upload-app \
--type ios \
--file mobile/ios/build/*.ipa \
--apiKey "$APP_STORE_CONNECT_API_KEY_ID" \
--apiIssuer "$APP_STORE_CONNECT_ISSUER_ID"
- name: Cleanup keychain and provisioning profile
if: always()
run: |
if security list-keychains | grep -q "build.keychain"; then
security delete-keychain build.keychain
fi
rm -f ~/Library/MobileDevice/Provisioning\ Profiles/build.mobileprovision
rm -f certificate.p12
rm -f AuthKey.p8

View File

@@ -1,92 +0,0 @@
name: Build Pinepods Snap
on:
# workflow_run:
# workflows: ["Build Tauri Clients"]
# types:
# - completed
workflow_dispatch:
inputs:
version:
description: "Version to build (for testing)"
required: true
default: "test"
jobs:
build-snap:
runs-on: ubuntu-latest
if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Get version
id: get_version
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
else
LATEST_RELEASE=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .tag_name)
echo "VERSION=$LATEST_RELEASE" >> $GITHUB_ENV
fi
- name: Install Snap
run: |
sudo apt-get update
sudo apt-get install -y snapd
- name: Install Snapcraft
run: |
sudo apt-get install -y snapd
sudo snap install core22
sudo snap install snapcraft --classic
- name: Install Multipass
run: |
sudo snap install multipass --classic
# - name: Setup LXD
# uses: canonical/setup-lxd@main
# with:
# channel: latest/edge
- name: Prepare Snap configuration
run: |
cp clients/snap/snapcraft.yaml ./snapcraft.yaml
sudo chown root:root snapcraft.yaml
sudo chmod 644 snapcraft.yaml
sed -i "s|version: '.*'|version: '$VERSION'|" snapcraft.yaml
sed -i "s|url: .*|url: https://github.com/${{ github.repository }}/releases/download/$VERSION/pinepods_${VERSION}_amd64.deb|" snapcraft.yaml
sed -i "s|Icon=appname|Icon=/usr/share/icons/hicolor/128x128/apps/com.gooseberrydevelopment.pinepods.png|" snapcraft.yaml
- name: Configure snapcraft to use Multipass
run: |
sudo snap set snapcraft provider=multipass
- name: Nuclear permissions reset
run: |
sudo rm -rf /root/project || true
sudo mkdir -p /root/project
sudo cp -r . /root/project/
sudo chown -R root:root /root/project
sudo chmod -R 777 /root/project
sudo chmod -R a+rwx /root/project
sudo ls -la /root/project
- name: Build Snap package
env:
SNAPCRAFT_PROJECT_DIR: ${{ github.workspace }}
run: sudo -E snapcraft --verbose
- name: Archive Snap files
run: |
mkdir snap_output
cp *.snap snap_output/
cp snapcraft.yaml snap_output/
tar -czvf snap_files.tar.gz snap_output
- name: Upload Snap archive
uses: actions/upload-artifact@v4
with:
name: snap-files
path: snap_files.tar.gz

View File

@@ -1,377 +0,0 @@
name: Build Tauri Clients
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: "Manual override version tag (optional)"
required: false
jobs:
compile:
name: Compile
strategy:
matrix:
os:
- ubuntu-latest
- ubuntu-arm64
- macOS-latest
- macOS-13
- windows-latest
include:
- os: ubuntu-arm64
runs-on: ubuntu-24.04-arm
runs-on: ${{ matrix.runs-on || matrix.os }}
env:
DEPENDS_SETUP: ${{ startsWith(matrix.os, 'ubuntu-') && 'true' || 'false' }}
steps:
- name: Set Image Tag (Unix)
if: matrix.os != 'windows-latest'
run: echo "IMAGE_TAG=${{ github.event.release.tag_name || github.event.inputs.version || 'latest' }}" >> $GITHUB_ENV
- name: Set Image Tag (Windows)
if: matrix.os == 'windows-latest'
run: echo "IMAGE_TAG=${{ github.event.release.tag_name || github.event.inputs.version || 'latest' }}" >> $Env:GITHUB_ENV
shell: pwsh
- name: Set environment variables
run: |
if [ "${{ matrix.os }}" = "ubuntu-latest" ]; then
echo "ARTIFACT_NAME1=Pinepods_${{ env.IMAGE_TAG }}_amd64.deb" >> $GITHUB_ENV
echo "ARTIFACT_NAME2=Pinepods_${{ env.IMAGE_TAG }}_amd64.AppImage" >> $GITHUB_ENV
echo "ARTIFACT_NAME3=Pinepods-${{ env.IMAGE_TAG }}-1.x86_64.rpm" >> $GITHUB_ENV
elif [ "${{ matrix.os }}" = "ubuntu-arm64" ]; then
echo "ARTIFACT_NAME1=Pinepods_${{ env.IMAGE_TAG }}_arm64.deb" >> $GITHUB_ENV
echo "ARTIFACT_NAME2=Pinepods_${{ env.IMAGE_TAG }}_aarch64.AppImage" >> $GITHUB_ENV
echo "ARTIFACT_NAME3=Pinepods-${{ env.IMAGE_TAG }}-1.aarch64.rpm" >> $GITHUB_ENV
# ... rest of conditions ...
elif [ "${{ matrix.os }}" = "windows-latest" ]; then
echo "ARTIFACT_NAME1=Pinepods_${{ env.IMAGE_TAG }}_x64-setup.exe" >> $Env:GITHUB_ENV
echo "ARTIFACT_NAME2=Pinepods_${{ env.IMAGE_TAG }}_x64_en-US.msi" >> $Env:GITHUB_ENV
elif [ "${{ matrix.os }}" = "macOS-latest" ]; then
echo "ARTIFACT_NAME1=Pinepods_${{ env.IMAGE_TAG }}_aarch64.dmg" >> $GITHUB_ENV
echo "ARTIFACT_NAME2=Pinepods.app" >> $GITHUB_ENV
elif [ "${{ matrix.os }}" = "macOS-13" ]; then
echo "ARTIFACT_NAME1=Pinepods_${{ env.IMAGE_TAG }}_x64.dmg" >> $GITHUB_ENV
echo "ARTIFACT_NAME2=Pinepods.app" >> $GITHUB_ENV
fi
shell: bash
if: ${{ matrix.os != 'windows-latest' }}
- name: Set environment variables (Windows)
run: |
echo "ARTIFACT_NAME1=Pinepods_${{ env.IMAGE_TAG }}_x64-setup.exe" >> $Env:GITHUB_ENV
echo "ARTIFACT_NAME2=Pinepods_${{ env.IMAGE_TAG }}_x64_en-US.msi" >> $Env:GITHUB_ENV
shell: pwsh
if: ${{ matrix.os == 'windows-latest' }}
- name: Setup | Checkout
uses: actions/checkout@v4
- uses: hecrj/setup-rust-action@v2
with:
rust-version: 1.89
targets: wasm32-unknown-unknown
# Install cargo-binstall for Linux/Windows
- name: Install cargo-binstall
if: matrix.os != 'macos-latest' && matrix.os != 'macOS-13'
uses: cargo-bins/cargo-binstall@main
- name: Depends install
if: ${{ env.DEPENDS_SETUP == 'true' }}
run: |
sudo apt update
sudo apt install -qy libgtk-3-dev
sudo apt-get install -y libwebkit2gtk-4.1-dev libappindicator3-dev librsvg2-dev patchelf
- name: wasm-addition
run: |
rustup target add wasm32-unknown-unknown
- name: Install Trunk (macOS)
if: matrix.os == 'macos-latest' || matrix.os == 'macOS-13'
run: |
brew install trunk
- name: Install Trunk (Linux/Windows)
if: matrix.os != 'macos-latest' && matrix.os != 'macOS-13'
run: |
cargo binstall trunk -y
- name: Install Tauri
run: |
cargo install tauri-cli@2.0.0-rc.15 --locked
- name: Update Tauri version (UNIX)
run: |
cd web/src-tauri
# Use different sed syntax for macOS
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' "s/\"version\": \".*\"/\"version\": \"${IMAGE_TAG}\"/" tauri.conf.json
else
sed -i "s/\"version\": \".*\"/\"version\": \"${IMAGE_TAG}\"/" tauri.conf.json
fi
cat tauri.conf.json
shell: bash
if: ${{ matrix.os != 'windows-latest' }}
- name: Setup Python
if: ${{ matrix.os == 'windows-latest' }}
uses: actions/setup-python@v2
with:
python-version: "3.x"
- name: Verify directory and update Tauri version (Windows)
if: ${{ matrix.os == 'windows-latest' }}
run: |
cd web/src-tauri
dir
python .\change-version.py tauri.conf.json ${{ env.IMAGE_TAG }}
Get-Content tauri.conf.json
shell: pwsh
- name: Build | Compile (UNIX)
run: |
cd web
RUSTFLAGS="--cfg=web_sys_unstable_apis --cfg getrandom_backend=\"wasm_js\"" trunk build --features server_build
cd src-tauri
cat tauri.conf.json
cargo tauri build
pwd
ls
ls -la target/release/bundle
shell: bash
if: ${{ matrix.os != 'windows-latest' }}
- name: Build | Compile (Windows)
run: |
cd web
powershell -ExecutionPolicy Bypass -File .\build.ps1
cd src-tauri
Get-Content tauri.conf.json
cargo tauri build
ls target/release/bundle
shell: pwsh
if: ${{ matrix.os == 'windows-latest' }}
# Ubuntu (x86_64) builds
- name: Archive builds (Ubuntu)
uses: actions/upload-artifact@v4
with:
name: ubuntu-latest-builds
path: |
./web/src-tauri/target/release/bundle/deb/${{ env.ARTIFACT_NAME1 }}
./web/src-tauri/target/release/bundle/appimage/${{ env.ARTIFACT_NAME2 }}
./web/src-tauri/target/release/bundle/rpm/${{ env.ARTIFACT_NAME3 }}
if: ${{ matrix.os == 'ubuntu-latest' }}
# Ubuntu ARM64 builds
- name: Archive builds (Ubuntu ARM)
uses: actions/upload-artifact@v4
with:
name: ubuntu-arm64-builds
path: |
./web/src-tauri/target/release/bundle/deb/${{ env.ARTIFACT_NAME1 }}
./web/src-tauri/target/release/bundle/appimage/${{ env.ARTIFACT_NAME2 }}
./web/src-tauri/target/release/bundle/rpm/${{ env.ARTIFACT_NAME3 }}
if: ${{ matrix.os == 'ubuntu-arm64' }}
# macOS builds - with distinct names
- name: Archive build (macOS ARM)
uses: actions/upload-artifact@v4
with:
name: macos-arm64-builds
path: |
./web/src-tauri/target/release/bundle/dmg/${{ env.ARTIFACT_NAME1 }}
./web/src-tauri/target/release/bundle/macos/${{ env.ARTIFACT_NAME2 }}
if: ${{ matrix.os == 'macOS-latest' }}
- name: Archive build (macOS x64)
uses: actions/upload-artifact@v4
with:
name: macos-x64-builds
path: |
./web/src-tauri/target/release/bundle/dmg/${{ env.ARTIFACT_NAME1 }}
./web/src-tauri/target/release/bundle/macos/${{ env.ARTIFACT_NAME2 }}
if: ${{ matrix.os == 'macOS-13' }}
- name: Archive build (Windows)
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.os }}-build
path: |
./web/src-tauri/target/release/bundle/nsis/${{ env.ARTIFACT_NAME1 }}
./web/src-tauri/target/release/bundle/msi/${{ env.ARTIFACT_NAME2 }}
if: ${{ matrix.os == 'windows-latest' }}
- name: Upload release asset (Ubuntu - DEB)
if: github.event_name == 'release' && matrix.os == 'ubuntu-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/deb/${{ env.ARTIFACT_NAME1 }}
asset_name: ${{ env.ARTIFACT_NAME1 }}
asset_content_type: application/vnd.debian.binary-package
- name: Upload release asset (Ubuntu ARM - DEB)
if: github.event_name == 'release' && matrix.os == 'ubuntu-arm64'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/deb/${{ env.ARTIFACT_NAME1 }}
asset_name: ${{ env.ARTIFACT_NAME1 }}
asset_content_type: application/vnd.debian.binary-package
- name: Upload release asset (Ubuntu - AppImage)
if: github.event_name == 'release' && matrix.os == 'ubuntu-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/appimage/${{ env.ARTIFACT_NAME2 }}
asset_name: ${{ env.ARTIFACT_NAME2 }}
asset_content_type: application/x-executable
- name: Upload release asset (Ubuntu ARM - AppImage)
if: github.event_name == 'release' && matrix.os == 'ubuntu-arm64'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/appimage/${{ env.ARTIFACT_NAME2 }}
asset_name: ${{ env.ARTIFACT_NAME2 }}
asset_content_type: application/x-executable
- name: Upload release asset (Ubuntu - RPM)
if: github.event_name == 'release' && matrix.os == 'ubuntu-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/rpm/${{ env.ARTIFACT_NAME3 }}
asset_name: ${{ env.ARTIFACT_NAME3 }}
asset_content_type: application/x-rpm
- name: Upload release asset (Ubuntu ARM - RPM)
if: github.event_name == 'release' && matrix.os == 'ubuntu-arm64'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/rpm/${{ env.ARTIFACT_NAME3 }}
asset_name: ${{ env.ARTIFACT_NAME3 }}
asset_content_type: application/x-rpm
- name: Upload release asset (macOS - DMG)
if: github.event_name == 'release' && (matrix.os == 'macOS-latest' || matrix.os == 'macOS-13')
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/dmg/${{ env.ARTIFACT_NAME1 }}
asset_name: ${{ env.ARTIFACT_NAME1 }}
asset_content_type: application/x-apple-diskimage
- name: Upload release asset (Windows - EXE)
if: github.event_name == 'release' && matrix.os == 'windows-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/nsis/${{ env.ARTIFACT_NAME1 }}
asset_name: ${{ env.ARTIFACT_NAME1 }}
asset_content_type: application/vnd.microsoft.portable-executable
- name: Upload release asset (Windows - MSI)
if: github.event_name == 'release' && matrix.os == 'windows-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./web/src-tauri/target/release/bundle/msi/${{ env.ARTIFACT_NAME2 }}
asset_name: ${{ env.ARTIFACT_NAME2 }}
asset_content_type: application/x-msi
# release:
# needs: compile
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Download artifacts
# uses: actions/download-artifact@v2
# with:
# name: ubuntu-latest-build
# path: artifacts/ubuntu-latest
# - name: Download artifacts
# uses: actions/download-artifact@v2
# with:
# name: macOS-latest-build
# path: artifacts/macOS-latest
# - name: Download artifacts
# uses: actions/download-artifact@v2
# with:
# name: windows-latest-build
# path: artifacts/windows-latest
# - name: Create Release
# id: create_release
# uses: actions/create-release@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# tag_name: release-${{ github.run_id }}-beta
# release_name: Release-${{ github.run_id }}-beta
# draft: false
# prerelease: true
# - name: Upload Release Asset
# id: upload-release-asset-ubuntu
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./artifacts/ubuntu-latest/PinePods
# asset_name: PinePods-ubuntu-latest
# asset_content_type: application/octet-stream
# - name: Upload Release Asset
# id: upload-release-asset-macos
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./artifacts/macOS-latest/PinePods
# asset_name: PinePods-macOS-latest
# asset_content_type: application/octet-stream
# - name: Upload Release Asset
# id: upload-release-asset-windows
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./artifacts/windows-latest/PinePods.exe
# asset_name: PinePods-windows-latest.exe
# asset_content_type: application/octet-stream

View File

@@ -1,103 +0,0 @@
name: Pinepods CI
on:
pull_request:
branches: [main]
push:
branches: [main]
workflow_dispatch:
jobs:
backend-tests:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:latest
env:
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
POSTGRES_DB: test_db
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.x"
- name: Setup test environment
run: |
chmod +x ./setup-tests.sh
./setup-tests.sh
- name: Run backend tests
env:
TEST_MODE: true
DB_HOST: localhost
DB_PORT: 5432
DB_USER: test_user
DB_PASSWORD: test_password
DB_NAME: test_db
DB_TYPE: postgresql
TEST_DB_TYPE: postgresql
PYTHONPATH: ${{ github.workspace }}
run: |
chmod +x ./run-tests.sh
./run-tests.sh postgresql
frontend-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: hecrj/setup-rust-action@v2
with:
rust-version: 1.89
targets: wasm32-unknown-unknown
# Install cargo-binstall for other OSes using the standard method
- name: Install cargo-binstall
if: matrix.os != 'macos-latest'
uses: cargo-bins/cargo-binstall@main
- name: Depends install
if: ${{ env.DEPENDS_SETUP == 'true' }}
run: |
sudo apt update
sudo apt install -qy libgtk-3-dev
sudo apt-get install -y libwebkit2gtk-4.0-dev libwebkit2gtk-4.1-dev libappindicator3-dev librsvg2-dev patchelf
- name: wasm-addition
run: |
rustup target add wasm32-unknown-unknown
- name: Install Trunk
run: |
cargo binstall trunk -y
- name: Run frontend tests
working-directory: ./web
run: |
RUSTFLAGS="--cfg=web_sys_unstable_apis" cargo test --features server_build -- --nocapture
# docker-build:
# runs-on: ubuntu-latest
# needs: [backend-tests, frontend-tests]
# steps:
# - uses: actions/checkout@v3
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and test Docker image
# run: |
# docker build -t pinepods:test .
# docker run --rm pinepods:test /bin/sh -c "python3 -m pytest /pinepods/tests/"

View File

@@ -1,120 +0,0 @@
name: Publish Pinepods Multi-Architecture Image to DockerHub
on:
release:
types: [released]
workflow_dispatch:
inputs:
version:
description: "Manual override version tag (optional)"
required: false
env:
REGISTRY: docker.io
IMAGE_NAME: madeofpendletonwool/pinepods
jobs:
set-env:
runs-on: ubuntu-latest
outputs:
IMAGE_TAG: ${{ steps.set_tags.outputs.IMAGE_TAG }}
CREATE_LATEST: ${{ steps.set_tags.outputs.CREATE_LATEST }}
steps:
- name: Set Image Tag and Latest Tag
id: set_tags
run: |
echo "IMAGE_TAG=${{ github.event.release.tag_name || github.event.inputs.version || 'latest' }}" >> $GITHUB_OUTPUT
if [ "${{ github.event_name }}" == "release" ]; then
echo "CREATE_LATEST=true" >> $GITHUB_OUTPUT
else
echo "CREATE_LATEST=false" >> $GITHUB_OUTPUT
fi
build-and-push-x86:
needs: set-env
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_KEY }}
- name: Build and push x86 image
run: |
docker build --platform linux/amd64 --build-arg PINEPODS_VERSION=${{ needs.set-env.outputs.IMAGE_TAG }} -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-amd64 -f dockerfile .
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-amd64
if [ "${{ needs.set-env.outputs.CREATE_LATEST }}" == "true" ]; then
docker tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-amd64 ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest-amd64
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest-amd64
fi
build-and-push-arm64:
needs: set-env
runs-on: ubuntu-24.04-arm
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_KEY }}
- name: Build and push ARM64 image
run: |
docker build --platform linux/arm64 --build-arg PINEPODS_VERSION=${{ needs.set-env.outputs.IMAGE_TAG }} -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-arm64 -f dockerfile-arm .
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-arm64
if [ "${{ needs.set-env.outputs.CREATE_LATEST }}" == "true" ]; then
docker tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-arm64 ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest-arm64
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest-arm64
fi
create-manifests:
needs: [set-env, build-and-push-x86, build-and-push-arm64]
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_KEY }}
- name: Create and push Docker manifest for the version tag
run: |
sleep 10
# Pull the images first to ensure they're available
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-amd64
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-arm64
# Create and push manifest
docker manifest create ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }} \
--amend ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-amd64 \
--amend ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}-arm64
docker manifest push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ needs.set-env.outputs.IMAGE_TAG }}
- name: Create and push Docker manifest for the latest tag
if: needs.set-env.outputs.CREATE_LATEST == 'true'
run: |
docker manifest create ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest \
--amend ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest-amd64 \
--amend ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest-arm64
docker manifest push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest

View File

@@ -1,93 +0,0 @@
name: Publish Pinepods Nightly Multi-Architecture Image to DockerHub
on:
schedule:
- cron: "23 1 * * *"
workflow_dispatch:
env:
REGISTRY: docker.io
IMAGE_NAME: madeofpendletonwool/pinepods
NIGHTLY_TAG: nightly
jobs:
build-and-push-nightly-x86:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_KEY }}
- name: Build and push x86 image
run: |
docker build --platform linux/amd64 -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}-amd64 -f dockerfile .
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}-amd64
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
build-and-push-nightly-arm64:
runs-on: ubuntu-24.04-arm
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_KEY }}
- name: Build and push ARMv8 image
run: |
docker build --platform linux/arm64 -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}-arm64 -f dockerfile-arm .
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}-arm64
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
manifest-nightly:
needs: [build-and-push-nightly-x86, build-and-push-nightly-arm64]
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_KEY }}
- name: Create and push Docker manifest for the nightly tag
run: |
docker manifest create ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }} \
--amend ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}-amd64 \
--amend ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}-arm64
docker manifest push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.NIGHTLY_TAG }}

View File

@@ -1,67 +0,0 @@
name: Notifications on release
on:
workflow_run:
workflows: ["Publish Pinepods Multi-Architecture Image to DockerHub"]
types:
- completed
workflow_dispatch:
inputs:
message_text:
description: "Manual override text (optional)"
required: false
jobs:
discord_announcement:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Fetch the latest release
id: fetch_release
run: |
latest_release=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r '.tag_name')
release_url=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r '.html_url')
echo "Latest release version: $latest_release"
echo "Release URL: $release_url"
echo "::set-output name=version::$latest_release"
echo "::set-output name=release_url::$release_url"
# Check if this is an RC release
if [[ "$latest_release" == *"-rc"* ]]; then
echo "RC release detected, skipping Discord notification"
echo "::set-output name=is_rc::true"
else
echo "::set-output name=is_rc::false"
fi
- name: Set release message
id: set_message
run: |
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "::set-output name=message::${{ github.event.inputs.message_text }}"
else
version="${{ steps.fetch_release.outputs.version }}"
release_url="${{ steps.fetch_release.outputs.release_url }}"
message="Pinepods Version $version Released! Check out the release [here]($release_url)"
echo "::set-output name=message::$message"
fi
- name: Skip Discord notification for RC release
if: steps.fetch_release.outputs.is_rc == 'true'
run: |
echo "Skipping Discord notification for RC release: ${{ steps.fetch_release.outputs.version }}"
- name: Discord notification to announce deployment
if: steps.fetch_release.outputs.is_rc == 'false'
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
uses: Ilshidur/action-discord@master
with:
args: ${{ steps.set_message.outputs.message }}

View File

@@ -1,53 +0,0 @@
name: Pre-Release Version Update
on:
workflow_dispatch:
inputs:
version:
description: "Version to set (e.g., 0.8.0)"
required: true
type: string
jobs:
update-version:
name: Update Version Files
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Update app version
run: |
cd mobile
VERSION_NAME=${{ github.event.inputs.version }}
# Calculate what the git count WILL BE after we commit (current + 1)
BUILD_NUMBER=$(($(git rev-list --count HEAD) + 1 + 20250000))
# Update pubspec.yaml version
sed -i "s/^version: .*/version: ${VERSION_NAME}+${BUILD_NUMBER}/" pubspec.yaml
# Update environment.dart constants
sed -i "s/static const _projectVersion = '[^']*';/static const _projectVersion = '${VERSION_NAME}';/" lib/core/environment.dart
sed -i "s/static const _build = '[^']*';/static const _build = '${BUILD_NUMBER}';/" lib/core/environment.dart
echo "Updated version to ${VERSION_NAME}+${BUILD_NUMBER}"
- name: Commit and push version update
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add mobile/pubspec.yaml mobile/lib/core/environment.dart
git commit -m "chore: update version to ${{ github.event.inputs.version }} [skip ci]"
git push
- name: Summary
run: |
echo "✅ Version updated to ${{ github.event.inputs.version }}"
echo "📋 Next steps:"
echo "1. Create a GitHub release pointing to the latest commit"
echo "2. The release workflow will build from that exact commit"
echo "3. Version files will match the commit for reproducible builds"

View File

@@ -1,43 +0,0 @@
# Simple workflow for deploying static content to GitHub Pages
name: Deploy static content to Pages
on:
# Runs on pushes targeting the default branch
push:
branches: ["main"]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
# Single deploy job since we're just deploying
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
# Upload entire repository
path: './docs'
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

View File

@@ -1,41 +0,0 @@
name: Test Pinepods
on:
# pull_request:
# types:
# - opened
# - synchronize
# branches: [ master ]
workflow_dispatch:
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build the Docker test container
run: docker build -t madeofpendletonwool/pinepods-test . -f dockerfile-test
- uses: rustsec/audit-check@v1.4.1
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Run tests in the Docker container
run: docker run madeofpendletonwool/pinepods-test
cache-checkmate:
runs-on: ubuntu-latest
steps:
- uses: taiki-e/cache-cargo-install-action@v1
with:
tool: cargo-checkmate
run-phase:
strategy:
matrix:
phase: [audit, build, check, clippy, doc, test]
needs: cache-checkmate
runs-on: ubuntu-latest
steps:
- uses: taiki-e/cache-cargo-install-action@v1
with:
tool: cargo-checkmate
- uses: actions/checkout@v4
- run: cargo-checkmate run ${{ matrix.phase }}

View File

@@ -1,98 +0,0 @@
name: Update AUR Package
on:
workflow_run:
workflows: ["Build Tauri Clients"]
types:
- completed
workflow_dispatch:
inputs:
version:
description: "Version tag (e.g. 0.6.6)"
required: true
jobs:
update-aur-package:
if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }}
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set version
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
else
# Extract version from the triggering release
RELEASE_TAG=$(curl -s "https://api.github.com/repos/${{ github.repository }}/releases/latest" | jq -r .tag_name)
echo "VERSION=$RELEASE_TAG" >> $GITHUB_ENV
fi
- name: Generate PKGBUILD
run: |
# Calculate checksums for both architectures
x86_64_url="https://github.com/madeofpendletonwool/PinePods/releases/download/$VERSION/Pinepods_${VERSION}_amd64.deb"
aarch64_url="https://github.com/madeofpendletonwool/PinePods/releases/download/$VERSION/Pinepods_${VERSION}_arm64.deb"
echo "Downloading and calculating checksums..."
curl -L "$x86_64_url" -o x86_64.deb
curl -L "$aarch64_url" -o aarch64.deb
x86_64_sum=$(sha256sum x86_64.deb | cut -d' ' -f1)
aarch64_sum=$(sha256sum aarch64.deb | cut -d' ' -f1)
cat > PKGBUILD << EOF
pkgname=pinepods
pkgver=$VERSION
pkgrel=1
pkgdesc="Pinepods is a complete podcast management system and allows you to play, download, and keep track of podcasts you enjoy. All self hosted and enjoyed on your own server!"
arch=('x86_64' 'aarch64')
url="https://github.com/madeofpendletonwool/PinePods"
license=('gpl3')
depends=('cairo' 'desktop-file-utils' 'gdk-pixbuf2' 'glib2' 'gtk3' 'hicolor-icon-theme' 'libsoup' 'pango' 'webkit2gtk')
options=('!strip' '!emptydirs')
source_x86_64=("https://github.com/madeofpendletonwool/PinePods/releases/download/\${pkgver}/Pinepods_\${pkgver}_amd64.deb")
source_aarch64=("https://github.com/madeofpendletonwool/PinePods/releases/download/\${pkgver}/Pinepods_\${pkgver}_arm64.deb")
sha256sums_x86_64=('$x86_64_sum')
sha256sums_aarch64=('$aarch64_sum')
package() {
# Extract the .deb package
cd "\$srcdir"
tar xf data.tar.gz -C "\$pkgdir/"
# Create symlink from /usr/bin/app to /usr/bin/pinepods
ln -s /usr/bin/app "\$pkgdir/usr/bin/pinepods"
# Ensure correct permissions
chmod 755 "\$pkgdir/usr/bin/app"
chmod 644 "\$pkgdir/usr/share/applications/Pinepods.desktop"
find "\$pkgdir/usr/share/icons" -type f -exec chmod 644 {} +
find "\$pkgdir" -type d -exec chmod 755 {} +
}
EOF
- name: Test PKGBUILD
uses: KSXGitHub/github-actions-deploy-aur@v3.0.1
with:
pkgname: pinepods
pkgbuild: ./PKGBUILD
test: true
commit_username: ${{ secrets.GIT_USER }}
commit_email: ${{ secrets.GIT_EMAIL }}
ssh_private_key: ${{ secrets.AUR_SSH_PRIVATE_KEY }}
commit_message: "Update to version ${{ env.VERSION }}"
ssh_keyscan_types: rsa,ecdsa,ed25519
- name: Publish AUR package
if: success()
uses: KSXGitHub/github-actions-deploy-aur@v3.0.1
with:
pkgname: pinepods
pkgbuild: ./PKGBUILD
commit_username: ${{ secrets.GIT_USER }}
commit_email: ${{ secrets.GIT_EMAIL }}
ssh_private_key: ${{ secrets.AUR_SSH_PRIVATE_KEY }}
commit_message: "Update to version ${{ env.VERSION }}"
ssh_keyscan_types: rsa,ecdsa,ed25519

View File

@@ -1,206 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
bin/
build/
develop-eggs/
dist/
eggs/
/lib/
/lib64/
# Exception for Flutter lib directory
!/mobile/lib/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
# Sphinx documentation
docs/_build/
# InstallerFiles
clients/windows-app/pinepods.exe
clients/linux-app/pinepods
clients/windows-app/dist
clients/linux-app/dist
clients/mac-app/dist
clients/mac-app/pinepods
clients/windows-app/generated*
clients/windows-app/pinepods.spec
clients/linux-app/pinepods.spec
# Mac Files
.DS_Store
clients/.DS_Store
*/.DS_Store
*/pinepods.spec
*/generated
clients/mac-app/pinepods.spec
# env files
*/env_file
*/.env
# pycharm
.idea/*
.idea/misc.xml
.idea/misc.xml
.idea/PyPods.iml
.idea/misc.xml
.idea/misc.xml
.idea/PyPods.iml
# Web Removals
web/target/*
web/.idea/*
keystore.properties
key.properties
**/key.properties
# Virtual Environment
venv/
.venv/
ENV/
# Python cache files
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.coverage
coverage.xml
.hypothesis/
# Environment variables
.env
.env.test
# IDE specific files
.vscode/
.idea/
*.swp
*.swo
# Test database
*.sqlite3
*.db
# Log files
*.log
# Local test directory
tests_local/
# Miscellaneous
*.class
*.log
*.pyc
*.swp
.DS_Store
.atom/
.buildlog/
.history
.svn/
migrate_working_dir/
# IntelliJ related
*.iml
*.ipr
*.iws
.idea/
# VS Code related
.vscode/
# Flutter/Dart/Pub related
**/doc/api/
**/ios/Flutter/.last_build_id
.dart_tool/
.flutter-plugins
.flutter-plugins-dependencies
.packages
.pub-cache/
.pub/
/build/
pubspec.lock
# Android related
**/android/**/gradle-wrapper.jar
**/android/.gradle
**/android/captures/
**/android/gradlew
**/android/gradlew.bat
**/android/local.properties
**/android/**/GeneratedPluginRegistrant.*
# iOS/XCode related
**/ios/**/*.mode1v3
**/ios/**/*.mode2v3
**/ios/**/*.moved-aside
**/ios/**/*.pbxuser
**/ios/**/*.perspectivev3
**/ios/**/*sync/
**/ios/**/.sconsign.dblite
**/ios/**/.tags*
**/ios/**/.vagrant/
**/ios/**/DerivedData/
**/ios/**/Icon?
**/ios/**/Pods/
**/ios/**/.symlinks/
**/ios/**/profile
**/ios/**/xcuserdata
**/ios/.generated/
**/ios/Flutter/App.framework
**/ios/Flutter/Flutter.framework
**/ios/Flutter/Generated.xcconfig
**/ios/Flutter/app.flx
**/ios/Flutter/app.zip
**/ios/Flutter/flutter_assets/
**/ios/ServiceDefinitions.json
**/ios/Runner/GeneratedPluginRegistrant.*
# Exceptions to above rules.
!**/ios/**/default.mode1v3
!**/ios/**/default.mode2v3
!**/ios/**/default.pbxuser
!**/ios/**/default.perspectivev3
# C/C++ build files
**/android/app/.cxx/
**/android/**/.cxx/

View File

@@ -1,12 +0,0 @@
version: '3'
services:
pinepods-backend:
image: madeofpendletonwool/pinepods_backend:latest
container_name: pinepods-backend
env_file: env_file
environment:
# Add your YouTube Data API v3 key here for YouTube channel search
- YOUTUBE_API_KEY=your_youtube_api_key_here
ports:
- 5000:5000
restart: unless-stopped

View File

@@ -1,46 +0,0 @@
# Builder stage for compiling the Actix web application
FROM rust:bookworm AS builder
# Install build dependencies
RUN apt-get update && apt-get upgrade -y && \
apt-get install -y --no-install-recommends \
libssl-dev pkg-config build-essential
# Set the working directory
WORKDIR /app
# Copy your application files to the builder stage
COPY ./pinepods_backend/Cargo.toml ./Cargo.toml
COPY ./pinepods_backend/src ./src
# Build the Actix web application in release mode
RUN cargo build --release
# Final stage for setting up the runtime environment
FROM debian:bookworm-slim
# Metadata
LABEL maintainer="Collin Pendleton <collinp@collinpendleton.com>"
# Install runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
bash curl openssl ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Copy the compiled binary from the builder stage
COPY --from=builder /app/target/release/pinepods_backend /usr/local/bin/pinepods_backend
COPY ./startup.sh /startup.sh
RUN chmod +x /startup.sh
# Set the working directory
WORKDIR /
# Set environment variables if needed
ENV RUST_LOG=info
# Expose the port that Actix will run on
EXPOSE 8080
# Start the Actix web server
CMD ["/startup.sh"]

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +0,0 @@
[package]
name = "pinepods_backend"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
actix-web = "4.11.0"
serde = { version = "1.0.225", features = ["derive"] }
serde_json = "1.0.145"
reqwest = { version = "0.12.23", features = ["json", "rustls-tls"] }
env_logger = "0.11.8"
log = "0.4.28"
dotenvy = "0.15.7"
sha1 = "0.10.6"
urlencoding = "2.1.3"
actix-cors = "0.7.1"
chrono = { version = "0.4.42", features = ["serde"] }

View File

@@ -1,642 +0,0 @@
use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use reqwest::header::{HeaderMap, HeaderValue, USER_AGENT};
use serde::{Deserialize, Serialize};
use std::env;
use dotenvy::dotenv;
use std::time::{SystemTime, UNIX_EPOCH};
use sha1::{Digest, Sha1};
use log::{info, error};
use actix_cors::Cors;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use chrono;
#[derive(Deserialize)]
struct SearchQuery {
query: Option<String>,
index: Option<String>,
search_type: Option<String>,
}
#[derive(Deserialize)]
struct PodcastQuery {
id: String,
}
#[derive(Deserialize)]
struct YouTubeChannelQuery {
id: String,
}
// Hit counter for API usage tracking
#[derive(Clone)]
struct HitCounters {
itunes_hits: Arc<AtomicU64>,
podcast_index_hits: Arc<AtomicU64>,
youtube_hits: Arc<AtomicU64>,
}
impl HitCounters {
fn new() -> Self {
Self {
itunes_hits: Arc::new(AtomicU64::new(0)),
podcast_index_hits: Arc::new(AtomicU64::new(0)),
youtube_hits: Arc::new(AtomicU64::new(0)),
}
}
fn increment_itunes(&self) {
self.itunes_hits.fetch_add(1, Ordering::Relaxed);
}
fn increment_podcast_index(&self) {
self.podcast_index_hits.fetch_add(1, Ordering::Relaxed);
}
fn increment_youtube(&self) {
self.youtube_hits.fetch_add(1, Ordering::Relaxed);
}
fn get_stats(&self) -> (u64, u64, u64) {
(
self.itunes_hits.load(Ordering::Relaxed),
self.podcast_index_hits.load(Ordering::Relaxed),
self.youtube_hits.load(Ordering::Relaxed),
)
}
}
// YouTube API response structures for search
#[derive(Deserialize, Serialize)]
struct YouTubeSearchResponse {
items: Vec<YouTubeChannelResult>,
}
#[derive(Deserialize, Serialize)]
struct YouTubeChannelResult {
id: YouTubeChannelId,
snippet: YouTubeChannelSnippet,
}
#[derive(Deserialize, Serialize)]
struct YouTubeChannelId {
#[serde(rename = "channelId")]
channel_id: String,
}
#[derive(Deserialize, Serialize)]
struct YouTubeChannelSnippet {
title: String,
description: String,
thumbnails: YouTubeThumbnails,
#[serde(rename = "channelTitle")]
channel_title: Option<String>,
}
#[derive(Deserialize, Serialize)]
struct YouTubeThumbnails {
default: Option<YouTubeThumbnail>,
medium: Option<YouTubeThumbnail>,
high: Option<YouTubeThumbnail>,
}
#[derive(Deserialize, Serialize)]
struct YouTubeThumbnail {
url: String,
}
// YouTube API response structures for channel details
#[derive(Deserialize)]
struct YouTubeChannelDetailsResponse {
items: Vec<YouTubeChannelDetailsItem>,
}
#[derive(Deserialize)]
struct YouTubeChannelDetailsItem {
snippet: YouTubeChannelDetailsSnippet,
statistics: Option<YouTubeChannelStatistics>,
}
#[derive(Deserialize)]
struct YouTubeChannelDetailsSnippet {
title: String,
description: String,
thumbnails: YouTubeThumbnails,
}
#[derive(Deserialize)]
struct YouTubeChannelStatistics {
#[serde(rename = "subscriberCount")]
subscriber_count: Option<String>,
#[serde(rename = "videoCount")]
video_count: Option<String>,
}
// YouTube API response structures for channel videos
#[derive(Deserialize)]
struct YouTubeVideosResponse {
items: Vec<YouTubeVideoItem>,
}
#[derive(Deserialize)]
struct YouTubeVideoItem {
id: YouTubeVideoId,
snippet: YouTubeVideoSnippet,
#[serde(rename = "contentDetails")]
content_details: Option<YouTubeVideoContentDetails>,
}
#[derive(Deserialize)]
struct YouTubeVideoId {
#[serde(rename = "videoId")]
video_id: String,
}
#[derive(Deserialize)]
struct YouTubeVideoSnippet {
title: String,
description: String,
thumbnails: YouTubeThumbnails,
#[serde(rename = "publishedAt")]
published_at: String,
}
#[derive(Deserialize)]
struct YouTubeVideoContentDetails {
duration: Option<String>,
}
// Simplified response format to match other APIs
#[derive(Serialize)]
struct YouTubeSearchResult {
results: Vec<YouTubeChannel>,
}
#[derive(Serialize)]
struct YouTubeChannel {
#[serde(rename = "channelId")]
channel_id: String,
name: String,
description: String,
#[serde(rename = "thumbnailUrl")]
thumbnail_url: String,
url: String,
}
// YouTube channel details response (when user clicks a channel)
#[derive(Serialize)]
struct YouTubeChannelDetails {
#[serde(rename = "channelId")]
channel_id: String,
name: String,
description: String,
#[serde(rename = "thumbnailUrl")]
thumbnail_url: String,
url: String,
#[serde(rename = "subscriberCount")]
subscriber_count: Option<i64>,
#[serde(rename = "videoCount")]
video_count: Option<i64>,
#[serde(rename = "recentVideos")]
recent_videos: Vec<YouTubeVideo>,
}
#[derive(Serialize)]
struct YouTubeVideo {
id: String,
title: String,
description: String,
url: String,
thumbnail: String,
#[serde(rename = "publishedAt")]
published_at: String,
duration: Option<String>,
}
async fn search_handler(
query: web::Query<SearchQuery>,
hit_counters: web::Data<HitCounters>,
) -> impl Responder {
println!("search_handler called");
if query.query.is_none() && query.index.is_none() {
println!("Empty query and index - returning 200 OK");
return HttpResponse::Ok().body("Test connection successful");
}
let search_term = query.query.clone().unwrap_or_default();
let index = query.index.clone().unwrap_or_default().to_lowercase();
let search_type = query.search_type.clone().unwrap_or_else(|| "term".to_string());
println!("Received search request - Query: {}, Index: {}, Type: {}", search_term, index, search_type);
println!("Searching for: {}", search_term);
let client = reqwest::Client::new();
println!("Client created");
let response = if index == "itunes" {
// iTunes Search
hit_counters.increment_itunes();
let itunes_search_url = format!("https://itunes.apple.com/search?term={}&media=podcast", search_term);
println!("Using iTunes search URL: {}", itunes_search_url);
client.get(&itunes_search_url).send().await
} else if index == "youtube" {
// YouTube Data API v3 Search
hit_counters.increment_youtube();
return search_youtube_channels(&search_term).await;
} else {
// Podcast Index API search
hit_counters.increment_podcast_index();
let (api_key, api_secret) = match get_api_credentials() {
Ok(creds) => creds,
Err(response) => return response,
};
let encoded_search_term = urlencoding::encode(&search_term);
println!("Encoded search term: {}", encoded_search_term);
println!("Search type: {}", search_type);
let podcast_search_url = match search_type.as_str() {
"person" => {
println!("Using /search/byperson endpoint");
format!("https://api.podcastindex.org/api/1.0/search/byperson?q={}", encoded_search_term)
},
_ => {
println!("Using /search/byterm endpoint");
format!("https://api.podcastindex.org/api/1.0/search/byterm?q={}", encoded_search_term)
},
};
println!("Using Podcast Index search URL: {}", podcast_search_url);
let headers = match create_auth_headers(&api_key, &api_secret) {
Ok(h) => h,
Err(response) => return response,
};
println!("Final Podcast Index URL: {}", podcast_search_url);
client.get(&podcast_search_url).headers(headers).send().await
};
handle_response(response).await
}
async fn search_youtube_channels(search_term: &str) -> HttpResponse {
println!("Searching YouTube for: {}", search_term);
let youtube_api_key = match env::var("YOUTUBE_API_KEY") {
Ok(key) => key,
Err(_) => {
error!("YOUTUBE_API_KEY not set in the environment");
return HttpResponse::InternalServerError().body("YouTube API key not configured");
}
};
let client = reqwest::Client::new();
let encoded_search_term = urlencoding::encode(search_term);
// YouTube Data API v3 search for channels
let youtube_search_url = format!(
"https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&q={}&maxResults=25&key={}",
encoded_search_term, youtube_api_key
);
println!("Using YouTube search URL: {}", youtube_search_url);
match client.get(&youtube_search_url).send().await {
Ok(resp) => {
if resp.status().is_success() {
match resp.json::<YouTubeSearchResponse>().await {
Ok(youtube_response) => {
// Convert YouTube response to our format
let channels: Vec<YouTubeChannel> = youtube_response.items.into_iter().map(|item| {
let thumbnail_url = item.snippet.thumbnails.high
.or(item.snippet.thumbnails.medium)
.or(item.snippet.thumbnails.default)
.map(|thumb| thumb.url)
.unwrap_or_default();
YouTubeChannel {
channel_id: item.id.channel_id.clone(),
name: item.snippet.title,
description: item.snippet.description,
thumbnail_url,
url: format!("https://www.youtube.com/channel/{}", item.id.channel_id),
}
}).collect();
let result = YouTubeSearchResult { results: channels };
match serde_json::to_string(&result) {
Ok(json_response) => {
println!("YouTube search successful, found {} channels", result.results.len());
HttpResponse::Ok().content_type("application/json").body(json_response)
}
Err(e) => {
error!("Failed to serialize YouTube response: {}", e);
HttpResponse::InternalServerError().body("Failed to process YouTube response")
}
}
}
Err(e) => {
error!("Failed to parse YouTube API response: {}", e);
HttpResponse::InternalServerError().body("Failed to parse YouTube response")
}
}
} else {
error!("YouTube API request failed with status: {}", resp.status());
HttpResponse::InternalServerError().body(format!("YouTube API error: {}", resp.status()))
}
}
Err(e) => {
error!("YouTube API request error: {}", e);
HttpResponse::InternalServerError().body("YouTube API request failed")
}
}
}
async fn podcast_handler(
query: web::Query<PodcastQuery>,
hit_counters: web::Data<HitCounters>,
) -> impl Responder {
println!("podcast_handler called");
hit_counters.increment_podcast_index();
let podcast_id = &query.id;
let client = reqwest::Client::new();
let (api_key, api_secret) = match get_api_credentials() {
Ok(creds) => creds,
Err(response) => return response,
};
let podcast_url = format!("https://api.podcastindex.org/api/1.0/podcasts/byfeedid?id={}", podcast_id);
println!("Using Podcast Index URL: {}", podcast_url);
let headers = match create_auth_headers(&api_key, &api_secret) {
Ok(h) => h,
Err(response) => return response,
};
let response = client.get(&podcast_url).headers(headers).send().await;
handle_response(response).await
}
async fn youtube_channel_handler(
query: web::Query<YouTubeChannelQuery>,
hit_counters: web::Data<HitCounters>,
) -> impl Responder {
println!("youtube_channel_handler called for channel: {}", query.id);
hit_counters.increment_youtube();
let youtube_api_key = match env::var("YOUTUBE_API_KEY") {
Ok(key) => key,
Err(_) => {
error!("YOUTUBE_API_KEY not set in the environment");
return HttpResponse::InternalServerError().body("YouTube API key not configured");
}
};
let client = reqwest::Client::new();
let channel_id = &query.id;
// Step 1: Get channel details and statistics
let channel_details_url = format!(
"https://www.googleapis.com/youtube/v3/channels?part=snippet,statistics&id={}&key={}",
channel_id, youtube_api_key
);
println!("Fetching channel details: {}", channel_details_url);
let channel_details = match client.get(&channel_details_url).send().await {
Ok(resp) => {
if resp.status().is_success() {
match resp.json::<YouTubeChannelDetailsResponse>().await {
Ok(details) => {
if details.items.is_empty() {
return HttpResponse::NotFound().body("Channel not found");
}
details.items.into_iter().next().unwrap()
}
Err(e) => {
error!("Failed to parse channel details: {}", e);
return HttpResponse::InternalServerError().body("Failed to parse channel details");
}
}
} else {
error!("Channel details request failed with status: {}", resp.status());
return HttpResponse::InternalServerError().body(format!("YouTube API error: {}", resp.status()));
}
}
Err(e) => {
error!("Channel details request error: {}", e);
return HttpResponse::InternalServerError().body("YouTube API request failed");
}
};
// Step 2: Get recent videos from the channel
let videos_url = format!(
"https://www.googleapis.com/youtube/v3/search?part=snippet&channelId={}&type=video&order=date&maxResults=10&key={}",
channel_id, youtube_api_key
);
println!("Fetching recent videos: {}", videos_url);
let videos = match client.get(&videos_url).send().await {
Ok(resp) => {
if resp.status().is_success() {
match resp.json::<YouTubeVideosResponse>().await {
Ok(videos_response) => {
videos_response.items.into_iter().map(|item| {
let thumbnail_url = item.snippet.thumbnails.medium
.or(item.snippet.thumbnails.high)
.or(item.snippet.thumbnails.default)
.map(|thumb| thumb.url)
.unwrap_or_default();
YouTubeVideo {
id: item.id.video_id.clone(),
title: item.snippet.title,
description: item.snippet.description,
url: format!("https://www.youtube.com/watch?v={}", item.id.video_id),
thumbnail: thumbnail_url,
published_at: item.snippet.published_at,
duration: item.content_details.and_then(|cd| cd.duration),
}
}).collect()
}
Err(e) => {
error!("Failed to parse videos response: {}", e);
return HttpResponse::InternalServerError().body("Failed to parse videos");
}
}
} else {
error!("Videos request failed with status: {}", resp.status());
// Continue without videos rather than failing completely
Vec::new()
}
}
Err(e) => {
error!("Videos request error: {}", e);
// Continue without videos rather than failing completely
Vec::new()
}
};
// Extract thumbnail URL from channel details
let thumbnail_url = channel_details.snippet.thumbnails.high
.or(channel_details.snippet.thumbnails.medium)
.or(channel_details.snippet.thumbnails.default)
.map(|thumb| thumb.url)
.unwrap_or_default();
// Parse subscriber and video counts
let subscriber_count = channel_details.statistics.as_ref()
.and_then(|stats| stats.subscriber_count.as_ref())
.and_then(|count| count.parse::<i64>().ok());
let video_count = channel_details.statistics.as_ref()
.and_then(|stats| stats.video_count.as_ref())
.and_then(|count| count.parse::<i64>().ok());
let result = YouTubeChannelDetails {
channel_id: channel_id.to_string(),
name: channel_details.snippet.title,
description: channel_details.snippet.description,
thumbnail_url,
url: format!("https://www.youtube.com/channel/{}", channel_id),
subscriber_count,
video_count,
recent_videos: videos,
};
match serde_json::to_string(&result) {
Ok(json_response) => {
println!("YouTube channel details successful for {}, found {} videos", result.name, result.recent_videos.len());
HttpResponse::Ok().content_type("application/json").body(json_response)
}
Err(e) => {
error!("Failed to serialize channel details response: {}", e);
HttpResponse::InternalServerError().body("Failed to process channel details")
}
}
}
async fn stats_handler(hit_counters: web::Data<HitCounters>) -> impl Responder {
let (itunes, podcast_index, youtube) = hit_counters.get_stats();
let stats = serde_json::json!({
"api_usage": {
"itunes_hits": itunes,
"podcast_index_hits": podcast_index,
"youtube_hits": youtube,
"total_hits": itunes + podcast_index + youtube
},
"timestamp": chrono::Utc::now().to_rfc3339()
});
HttpResponse::Ok().content_type("application/json").json(stats)
}
fn get_api_credentials() -> Result<(String, String), HttpResponse> {
let api_key = match env::var("API_KEY") {
Ok(key) => key,
Err(_) => {
println!("API_KEY not set in the environment");
return Err(HttpResponse::InternalServerError().body("API_KEY not set"));
}
};
let api_secret = match env::var("API_SECRET") {
Ok(secret) => secret,
Err(_) => {
println!("API_SECRET not set in the environment");
return Err(HttpResponse::InternalServerError().body("API_SECRET not set"));
}
};
Ok((api_key, api_secret))
}
fn create_auth_headers(api_key: &str, api_secret: &str) -> Result<HeaderMap, HttpResponse> {
let epoch_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs().to_string();
let data_to_hash = format!("{}{}{}", api_key, api_secret, epoch_time);
let mut hasher = Sha1::new();
hasher.update(data_to_hash.as_bytes());
let sha_1 = format!("{:x}", hasher.finalize());
let mut headers = HeaderMap::new();
headers.insert("X-Auth-Date", HeaderValue::from_str(&epoch_time).unwrap_or_else(|e| {
error!("Failed to insert X-Auth-Date header: {:?}", e);
std::process::exit(1);
}));
headers.insert("X-Auth-Key", HeaderValue::from_str(api_key).unwrap_or_else(|e| {
error!("Failed to insert X-Auth-Key header: {:?}", e);
std::process::exit(1);
}));
headers.insert("Authorization", HeaderValue::from_str(&sha_1).unwrap_or_else(|e| {
error!("Failed to insert Authorization header: {:?}", e);
std::process::exit(1);
}));
headers.insert(USER_AGENT, HeaderValue::from_static("PodPeopleDB/1.0"));
Ok(headers)
}
async fn handle_response(response: Result<reqwest::Response, reqwest::Error>) -> HttpResponse {
match response {
Ok(resp) => {
if resp.status().is_success() {
println!("Request succeeded");
match resp.text().await {
Ok(body) => {
println!("Response body: {:?}", body);
HttpResponse::Ok().content_type("application/json").body(body)
},
Err(_) => {
error!("Failed to parse response body");
HttpResponse::InternalServerError().body("Failed to parse response body")
}
}
} else {
error!("Request failed with status code: {}", resp.status());
println!("Request Headers: {:?}", resp.headers());
HttpResponse::InternalServerError().body(format!("Request failed with status code: {}", resp.status()))
}
}
Err(err) => {
error!("Request error: {:?}", err);
HttpResponse::InternalServerError().body("Request error occurred")
}
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
dotenv().ok();
env_logger::init();
println!("Starting the Actix Web server with yt search");
// Initialize hit counters
let hit_counters = web::Data::new(HitCounters::new());
HttpServer::new(move || {
let cors = Cors::default()
.allow_any_origin() // Allow all origins since this is self-hostable
.allow_any_method() // Allow all HTTP methods
.allow_any_header() // Allow all headers
.supports_credentials()
.max_age(3600); // Cache preflight requests for 1 hour
App::new()
.app_data(hit_counters.clone())
.wrap(cors)
.route("/api/search", web::get().to(search_handler))
.route("/api/podcast", web::get().to(podcast_handler))
.route("/api/youtube/channel", web::get().to(youtube_channel_handler))
.route("/api/stats", web::get().to(stats_handler))
})
.bind("0.0.0.0:5000")?
.run()
.await
}

View File

@@ -1,22 +0,0 @@
#!/bin/bash
# Source the environment variables directly from the env_file
if [ -f /path/to/env_file ]; then
source /path/to/env_file
fi
# Log the environment variables to ensure they're set
echo "API_KEY: ${API_KEY}, API_SECRET: ${API_SECRET}"
# Start the Actix web application
/usr/local/bin/pinepods_backend
if [ $? -ne 0 ]; then
echo "Failed to start pinepods_backend"
exit 1
fi
# Print debugging information
echo "Actix web application started."
# Keep the container running
tail -f /dev/null

View File

@@ -1,12 +0,0 @@
Welcome to the Pinepods Repository! Thanks for considering contributing to this passion project! Check out the Readme if you haven't for a project overview.
Not a whole lot of rules here. To contribute to this project please simply fork the project and create a pull request when done with detail on what you added. Take a look at the issues for some inspiration. There's quite a few issues in there listed as first time issues that, once you get a hang of the project would be super quick to fix. There's also an issue in there to fill out some documentation in the external documentation repo for some no-code contributions.
There's a dev guide on the doc site to help get you set up:
https://www.pinepods.online/docs/Developing/Developing
Priorities right now is getting the app to a full v1 state. If you're looking for something big and exciting take a look at the Youtube Subscriptions issue, otherwise there's plenty of quick and easy visual fixes or quick and easy functionality additions. Category button improvments, remove shared episode reference job, known timezone issue, downloads page visual improvments... etc.
Here's the docs repo: https://github.com/madeofpendletonwool/Pinepods-Docs
There's also Pinepods Firewood, a project I've been working on for a CLI interface to either share pocasts to or browse podcasts on your Pinepods server. Entirely built in Rust!
Here's Pinepods firewood: https://github.com/madeofpendletonwool/pinepods-firewood

View File

@@ -1,28 +0,0 @@
FROM python:3.11-slim
# Install PostgreSQL dev libraries and required packages
RUN apt-get update && apt-get install -y \
libpq-dev \
gcc \
&& rm -rf /var/lib/apt/lists/*
# Install required packages
RUN pip install psycopg[binary] mysql-connector-python cryptography passlib argon2-cffi
# Copy validation scripts
COPY database_functions/ /app/database_functions/
COPY validate_db.py /app/
# Set working directory
WORKDIR /app
# Set default environment variables for MySQL (TEST ONLY - NOT SECURE)
ENV DB_TYPE=mysql
ENV DB_HOST=mysql_db
ENV DB_PORT=3306
ENV DB_USER=root
ENV DB_PASSWORD=test_password_123
ENV DB_NAME=pinepods_database
# Run validator
CMD ["python", "validate_db.py", "--verbose"]

View File

@@ -1,28 +0,0 @@
FROM python:3.11-slim
# Install PostgreSQL dev libraries and required packages
RUN apt-get update && apt-get install -y \
libpq-dev \
gcc \
&& rm -rf /var/lib/apt/lists/*
# Install required packages
RUN pip install psycopg[binary] mysql-connector-python cryptography passlib argon2-cffi
# Copy validation scripts
COPY database_functions/ /app/database_functions/
COPY validate_db.py /app/
# Set working directory
WORKDIR /app
# Set default environment variables for PostgreSQL (TEST ONLY - NOT SECURE)
ENV DB_TYPE=postgresql
ENV DB_HOST=postgres_db
ENV DB_PORT=5432
ENV DB_USER=postgres
ENV DB_PASSWORD=test_password_123
ENV DB_NAME=pinepods_database
# Run validator
CMD ["python", "validate_db.py", "--verbose"]

View File

@@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -1,683 +0,0 @@
<p align="center">
<img width="500" height="500" src="./images/pinepods-logo.jpeg">
</p>
# PinePods :evergreen_tree:
[![Discord](https://img.shields.io/badge/discord-join%20chat-5B5EA6)](https://discord.gg/bKzHRa4GNc)
[![Chat on Matrix](https://matrix.to/img/matrix-badge.svg)](https://matrix.to/#/#pinepods:matrix.org)
[![Docker Container Build](https://github.com/madeofpendletonwool/PinePods/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/madeofpendletonwool/PinePods/actions)
[![GitHub Release](https://img.shields.io/github/v/release/madeofpendletonwool/pinepods)](https://github.com/madeofpendletonwool/PinePods/releases)
---
- [PinePods :evergreen_tree:](#pinepods-evergreen_tree)
- [Getting Started](#getting-started)
- [Features](#features)
- [Try it out! :zap:](#try-it-out-zap)
- [Installing :runner:](#installing-runner)
- [Server Installation :floppy_disk:](#server-installation-floppy_disk)
- [Docker Compose](#docker-compose)
- [Helm Deployment](#helm-deployment)
- [Admin User Info](#admin-user-info)
- [Note on the Search API](#note-on-the-search-api)
- [Timezone Configuration](#timezone-configuration)
- [Start it up!](#start-it-up)
- [Client Installs](#client-installs)
- [Linux Client Install :computer:](#linux-client-install-computer)
- [Windows Client Install :computer:](#windows-client-install-computer)
- [Mac Client Install :computer:](#mac-client-install-computer)
- [Android Install :iphone:](#android-install-iphone)
- [iOS Install :iphone:](#ios-install-iphone)
- [PodPeople DB](#podpeople-db)
- [Pinepods Firewood](#pinepods-firewood)
- [Platform Availability](#platform-availability)
- [ToDo](#todo)
- [Screenshots :camera:](#screenshots-camera)
# Getting Started
PinePods is a Rust based podcast management system that manages podcasts with multi-user support and relies on a central database with clients to connect to it. It's browser based and your podcasts and settings follow you from device to device due to everything being stored on the server. You can subscribe to podcasts and even hosts for podcasts with the help of the PodPeopleDB. It has a native mobile app for Ios and Android and comes prebaked with it own internal gpodder server so you can use external apps like Antennapod as well!
For more information than what's provided in this repo visit the [documentation site](https://www.pinepods.online/).
<p align="center">
<img src="./images/screenshots/homethemed.png">
</p>
## Features
Pinepods is a complete podcast management system and allows you to play, download, and keep track of podcasts you (or any of your users) enjoy. It allows for searching and subscribing to hosts and podcasts using The Podcast Index or Itunes and provides a modern looking UI to browse through shows and episodes. In addition, Pinepods provides simple user management and can be used by multiple users at once using a browser or app version. Everything is saved into a MySQL, MariaDB, or Postgres database including user settings, podcasts and episodes. It's fully self-hosted, open-sourced, and I provide an option to use a hosted search API or you can also get one from the Podcast Index and use your own. There's even many different themes to choose from! Everything is fully dockerized and I provide a simple guide found below explaining how to install and run Pinepods on your own system.
There's plenty more features as well, check out the [Pinepods Site](https://www.pinepods.online/docs/Features/smart-playlists) for more!
## Try it out! :zap:
I maintain an instance of Pinepods that's publicly accessible for testing over at [try.pinepods.online](https://try.pinepods.online). Feel free to make an account there and try it out before making your own server instance. This is not intended as a permanent method of using Pinepods and it's expected you run your own server; accounts will often be deleted from there.
## Installing :runner:
There's potentially a few steps to getting Pinepods fully installed. After you get your server up and running fully you can also install the client editions of your choice. The server install of Pinepods runs a server and a browser client over a port of your choice in order to be accessible on the web. With the client installs you simply give the client your server url to connect to the database and then sign in.
### Server Installation :floppy_disk:
First, the server. You have multiple options for deploying Pinepods:
- [Using Docker Compose :whale:](#docker-compose)
- [Using Helm for Kubernetes :anchor:](#helm-deployment)
You can also choose to use MySQL/MariaDB or Postgres as your database. Examples for both are provided below.
### Docker Compose
> **⚠️ WARNING:** An issue was recently pointed out to me related to postgres version 18. If you run into an error that looks like this on startup when using postgres:
```
Failed to deploy a stack: compose up operation failed: Error response from daemon: failed to create task for container: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: error mounting "<POSTGRESQL_PATH>" to rootfs at "/var/lib/postgresql/data": change mount propagation through procfd: open o_path procfd: open /<DOCKER ROOT>/overlay2/17561d31d0730b3fd3071752d82cf8fe60b2ea0ed84521c6ee8b06427ca8f064/merged/var/lib/postgresql/data: no such file or directory: unknown`
```
> Please change your postgres tag in your compose to '17'. See [this issue](https://github.com/docker-library/postgres/issues/1363) for more details.
#### User Permissions
Pinepods can run with specific user permissions to ensure downloaded files are accessible on the host system. This is controlled through two environment variables:
- `PUID`: Process User ID (defaults to 1000 if not set)
- `PGID`: Process Group ID (defaults to 1000 if not set)
To find your user's UID and GID, run:
```bash
id -u # Your UID
id -g # Your GID
```
#### Compose File - PostgreSQL (Recommended)
```yaml
services:
db:
container_name: db
image: postgres:17
environment:
POSTGRES_DB: pinepods_database
POSTGRES_USER: postgres
POSTGRES_PASSWORD: myS3curepass
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- /home/user/pinepods/pgdata:/var/lib/postgresql/data
restart: always
valkey:
image: valkey/valkey:8-alpine
restart: always
pinepods:
image: madeofpendletonwool/pinepods:latest
ports:
- "8040:8040"
environment:
# Basic Server Info
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
PEOPLE_API_URL: 'https://people.pinepods.online'
HOSTNAME: 'http://localhost:8040'
# Database Vars
DB_TYPE: postgresql
DB_HOST: db
DB_PORT: 5432
DB_USER: postgres
DB_PASSWORD: myS3curepass
DB_NAME: pinepods_database
# Valkey Settings
VALKEY_HOST: valkey
VALKEY_PORT: 6379
# Enable or Disable Debug Mode for additional Printing
DEBUG_MODE: false
PUID: ${UID:-911}
PGID: ${GID:-911}
# Add timezone configuration
TZ: "America/New_York"
volumes:
# Mount the download and backup locations on the server
- /home/user/pinepods/downloads:/opt/pinepods/downloads
- /home/user/pinepods/backups:/opt/pinepods/backups
# Timezone volumes, HIGHLY optional. Read the timezone notes below
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
restart: always
depends_on:
- db
- valkey
```
#### Compose File - MariaDB (Alternative)
```yaml
services:
db:
container_name: db
image: mariadb:12
command: --wait_timeout=1800
environment:
MYSQL_TCP_PORT: 3306
MYSQL_ROOT_PASSWORD: myS3curepass
MYSQL_DATABASE: pinepods_database
MYSQL_COLLATION_SERVER: utf8mb4_unicode_ci
MYSQL_CHARACTER_SET_SERVER: utf8mb4
MYSQL_INIT_CONNECT: 'SET @@GLOBAL.max_allowed_packet=64*1024*1024;'
volumes:
- /home/user/pinepods/sql:/var/lib/mysql
restart: always
valkey:
image: valkey/valkey:8-alpine
pinepods:
image: madeofpendletonwool/pinepods:latest
ports:
- "8040:8040"
environment:
# Basic Server Info
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
PEOPLE_API_URL: 'https://people.pinepods.online'
HOSTNAME: 'http://localhost:8040'
# Database Vars
DB_TYPE: mariadb
DB_HOST: db
DB_PORT: 3306
DB_USER: root
DB_PASSWORD: myS3curepass
DB_NAME: pinepods_database
# Valkey Settings
VALKEY_HOST: valkey
VALKEY_PORT: 6379
# Enable or Disable Debug Mode for additional Printing
DEBUG_MODE: false
PUID: ${UID:-911}
PGID: ${GID:-911}
# Add timezone configuration
TZ: "America/New_York"
volumes:
# Mount the download and backup locations on the server
- /home/user/pinepods/downloads:/opt/pinepods/downloads
- /home/user/pinepods/backups:/opt/pinepods/backups
# Timezone volumes, HIGHLY optional. Read the timezone notes below
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
depends_on:
- db
- valkey
```
Make sure you change these variables to variables specific to yourself at a minimum.
```
# The url you hit the site at. Only used for sharing rss feeds
HOSTNAME: 'http://localhost:8040'
# These next 4 are optional. They allow you to set an admin without setting on the first boot
USERNAME: pinepods
PASSWORD: password
FULLNAME: John Pinepods
EMAIL: john@pinepods.com
# DB vars should match your values for the db you set up above
DB_TYPE: postgresql
DB_HOST: db
DB_PORT: 5432
DB_USER: postgres
DB_PASSWORD: myS3curepass
DB_NAME: pinepods_database
```
Most of those are pretty obvious, but let's break a couple of them down.
#### Admin User Info
First of all, the USERNAME, PASSWORD, FULLNAME, and EMAIL vars are your details for your default admin account. This account will have admin credentials and will be able to log in right when you start up the app. Once started you'll be able to create more users and even more admins but you need an account to kick things off on. If you don't specify credentials in the compose file it will prompt you to create an account before first login.
#### Note on the Search API
Let's talk quickly about the searching API. This allows you to search for new podcasts and it queries either itunes or the podcast index for new podcasts. It also allows for searching youtube channels via the Google Search API. The podcast index and Google Search require an api key while itunes does not. If you'd rather not mess with the api at all simply set the API_URL to the one below, however, know that Google implements a limit per day on youtube searches and the search api that I maintain below hits it's limit pretty quick. So if you're a big youtube user you might want to host your own.
```
SEARCH_API_URL: 'https://search.pinepods.online/api/search'
```
Above is an api that I maintain. I do not guarantee 100% uptime on this api though, it should be up most of the time besides a random internet or power outage here or there. A better idea though, and what I would honestly recommend is to maintain your own api. It's super easy. Check out the API docs for more information on doing this. Link Below -
https://www.pinepods.online/docs/API/search_api
#### Timezone Configuration
PinePods supports displaying timestamps in your local timezone instead of UTC. This helps improve readability and prevents confusion when viewing timestamps such as "last sync" times in the gpodder API. Note that this configuration is specifically for logs. Each user sets their own timezone settings on first login. That is seperate from this server timezone config.
##### Setting the Timezone
You have two main options for configuring the timezone in PinePods:
##### Option 1: Using the TZ Environment Variable (Recommended)
Add the `TZ` environment variable to your docker-compose.yml file:
```yaml
services:
pinepods:
image: madeofpendletonwool/pinepods:latest
environment:
# Other environment variables...
TZ: "America/Chicago" # Set your preferred timezone
```
This method works consistently across all operating systems (Linux, macOS, Windows) and is the recommended approach.
##### Option 2: Mounting Host Timezone Files (Linux Only)
On Linux systems, you can mount the host's timezone files:
```yaml
services:
pinepods:
image: madeofpendletonwool/pinepods:latest
volumes:
# Other volumes...
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
```
**Note**: This method only works reliably on Linux hosts. For macOS and Windows users, please use the TZ environment variable (Option 1).
##### Priority
If both methods are used:
1. The TZ environment variable takes precedence
2. Mounted timezone files are used as a fallback
##### Common Timezone Values
Here are some common timezone identifiers:
- `America/New_York` - Eastern Time
- `America/Chicago` - Central Time
- `America/Denver` - Mountain Time
- `America/Los_Angeles` - Pacific Time
- `Europe/London` - United Kingdom
- `Europe/Berlin` - Central Europe
- `Asia/Tokyo` - Japan
- `Australia/Sydney` - Australia Eastern
For a complete list of valid timezone identifiers, see the [IANA Time Zone Database](https://www.iana.org/time-zones).
##### Troubleshooting Timezones
**I'm on macOS and timezone settings aren't working**
macOS uses a different timezone file format than Linux. You must use the TZ environment variable method on macOS.
#### Start it up!
Either way, once you have everything all setup and your compose file created go ahead and run
```
sudo docker-compose up
```
To pull the container images and get started. Once fully started up you'll be able to access pinepods at the port you configured and you'll be able to start connecting clients as well.
### Helm Deployment
Alternatively, you can deploy Pinepods using Helm on a Kubernetes cluster. Helm is a package manager for Kubernetes that simplifies deployment.
#### Adding the Helm Repository
First, add the Pinepods Helm repository:
```bash
helm repo add pinepods http://helm.pinepods.online
helm repo update
```
#### Installing the Chart
To install the Pinepods Helm chart with default values:
```bash
helm install pinepods pinepods/pinepods --namespace pinepods-namespace --create-namespace
```
Or with custom values:
```bash
helm install pinepods pinepods/pinepods -f my-values.yaml --namespace pinepods-namespace --create-namespace
```
#### Configuration Options
The Helm chart supports extensive configuration. Key areas include:
**Main Application:**
- Image repository and tag configuration
- Service type and port settings
- Ingress configuration with TLS support
- Persistent storage for downloads and backups
- Resource limits and requests
- Security contexts and pod placement
**Dependencies:**
- PostgreSQL database (can be disabled for external database)
- Valkey/Redis for caching (can be disabled)
- Optional backend API deployment for self-hosted search
- Optional PodPeople database for podcast host information
**Example values.yaml:**
```yaml
# Main application configuration
image:
repository: madeofpendletonwool/pinepods
tag: latest
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 8040
ingress:
enabled: true
className: ""
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
hosts:
- host: pinepods.example.com
paths:
- path: /
pathType: Prefix
tls: []
# Persistent storage
persistence:
enabled: true
downloads:
storageClass: "" # Use default storage class
size: 5Gi
backups:
storageClass: ""
size: 2Gi
# Database configuration
postgresql:
enabled: true
auth:
username: postgres
password: "changeme"
database: pinepods_database
persistence:
enabled: true
size: 3Gi
# Valkey/Redis configuration
valkey:
enabled: true
architecture: standalone
auth:
enabled: false
# Optional backend API (self-hosted search)
backend:
enabled: false
secrets:
apiKey: "YOUR_PODCAST_INDEX_KEY"
apiSecret: "YOUR_PODCAST_INDEX_SECRET"
# Optional PodPeople database
podpeople:
enabled: false
# Application environment
env:
USERNAME: "admin"
PASSWORD: "password"
FULLNAME: "Admin User"
EMAIL: "admin@example.com"
DEBUG_MODE: "false"
HOSTNAME: 'http://localhost:8040'
```
#### External Database Configuration
To use an external database instead of the included PostgreSQL:
```yaml
postgresql:
enabled: false
externalDatabase:
host: "your-postgres-host"
port: 5432
user: postgres
password: "your-password"
database: pinepods_database
```
#### Create a Namespace for Pinepods
Create a namespace to hold the deployment:
```bash
kubectl create namespace pinepods-namespace
```
#### Starting Helm
Once you have everything set up, install the Helm chart:
```bash
helm install pinepods pinepods/pinepods -f my-values.yaml --namespace pinepods-namespace --create-namespace
```
This will deploy Pinepods on your Kubernetes cluster with a postgres database. MySQL/MariaDB is not supported with the kubernetes setup. The service will be accessible at the specified NodePort.
Check out the Tutorials on the documentation site for more information on how to do basic things:
https://pinepods.online/tutorial-basic/sign-in-homescreen.md
## Client Installs
Any of the client additions are super easy to get going.
### Linux Client Install :computer:
#### AppImage, Fedora/Red Hat Derivative/Debian based (Ubuntu)
First head over to the releases page on Github
https://github.com/madeofpendletonwool/PinePods/releases
Grab the latest linux release. There's both an appimage a deb, and an rpm. Use the appimage of course if you aren't using a debian or red hat based distro. Change the permissions if using the appimage version to allow it to run.
```
sudo chmod +x pinepods.appimage
```
^ The name of the app file will vary slightly based on the version so be sure you change it or it won't work.
For the rpm or deb version just run and install
Once started you'll be able to sign in with your username and password. The server name is simply the url you browse to to access the server.
#### Arch Linux (AUR)
Install the Pinepods Client right from the AUR! Replace the command below with your favorite aur helper
```
paru -S pinepods
```
#### Flatpak
<a href="https://flathub.org/en/apps/com.gooseberrydevelopment.pinepods">
<img src="https://flathub.org/api/badge?locale=en" alt="Get it on Flathub" width="240">
</a>
You can search for Pinepods in your favorite flatpak installer gui app such as Gnome Software.
Flathub page can be found [here](https://flathub.org/apps/com.gooseberrydevelopment.pinepods)
```
flatpak install flathub com.gooseberrydevelopment.pinepods
```
#### Snap
I have had such a nightmare trying to make the snap client work. Pass, use the flatpak. They're better anyway. I'll test it again in the future and see if Canonical has gotten it together. If you really want a snap version of the client please reach out and tell me you're interested in the first place.
### Windows Client Install :computer:
First head over to the releases page on Github
https://github.com/madeofpendletonwool/PinePods/releases
There's a exe and msi windows install file.
The exe will actually start an install window and allow you to properly install the program to your computer.
The msi will simply run a portable version of the app.
Either one does the same thing ultimately and will work just fine.
Once started you'll be able to sign in with your username and password. The server name is simply the url you browse to to access the server.
### Mac Client Install :computer:
First head over to the releases page on Github
https://github.com/madeofpendletonwool/PinePods/releases
There's a dmg and pinepods_mac file.
Simply extract, and then go into Contents/MacOS. From there you can run the app.
The dmg file will prompt you to install the Pinepods client into your applications folder while the _mac file will just run a portable version of the app.
Once started you'll be able to sign in with your username and password. The server name is simply the url you browse to to access the server.
### Android Install :iphone:
<a href="https://apt.izzysoft.de/fdroid/index/apk/com.gooseberrydevelopment.pinepods">
<img src="https://gitlab.com/IzzyOnDroid/repo/-/raw/master/assets/IzzyOnDroid.png" alt="Get it on IzzyOnDroid" width="200">
</a>
<a href="https://apps.obtainium.imranr.dev/redirect?r=obtainium://app/%7B%22id%22%3A%22com.gooseberrydevelopment.pinepods%22%2C%22url%22%3A%22https%3A//github.com/madeofpendletonwool/PinePods%22%2C%22author%22%3A%22madeofpendletonwool%22%2C%22name%22%3A%22PinePods%22%2C%22installerUrl%22%3A%22https%3A//github.com/madeofpendletonwool/PinePods/releases/latest%22%7D">
<img src="./images/badge_obtainium.png" alt="Get it on Obtainium" width="200">
</a>
Currently there's options for direct downloads and Pinepods is on the IzzyOnDroid storefront! More locations coming soon!
### iOS Install :iphone:
<a href="https://apps.apple.com/us/app/pinepods/id6751441116">
<img src="./images/Download_on_the_App_Store_Badge_US-UK_RGB_blk_092917.svg" alt="Download on the App Store" width="200">
</a>
The iOS app has arrived! Enjoy!
## PodPeople DB
Podpeople DB is a project that I maintain and also develop. Podpeople DB is a way to supplement Person tags for podcasts that don't support them by default. This allows the community to maintain hosts and follow them to all podcasts! I maintain an instance of Podpeople DB at podpeopledb.com. Otherwise, it's an open source project and you can maintain an instance of your own if you prefer. For information on that go [here](https://podpeopledb.com/docs/self-host). You can download the database yourself and maintain your own instance. If you do decide to go this route please still add any hosts for your favorite podcasts at the instance hosted at podpeopledb.com. The community will thank you!
For additional info on Podpeople DB check out [the docs](https://podpeopledb.com/docs/what-is-this-for).
Additionally, I've written [a blog](https://www.pinepods.online/blog) post discussing the rationale around its creation.
Finally, you can check out the Repo for it [here!](https://github.com/madeofpendletonwool/podpeople-db)
## Pinepods Firewood
A CLI only client that can be used to remotely share your podcasts to has had it's first release! Now you can enjoy podcasts from the comfort of your terminal! Check out [Pinepods Firewood!](https://github.com/madeofpendletonwool/pinepods-firewood)
## Platform Availability
The Intention is for this app to become available on Windows, Linux, Mac, Android, and iOS. Windows, Linux, Mac, web, and android are all currently available and working.
ARM devices are also supported including raspberry pis. The app is shockingly performant on a raspberry pi as well. The only limitation is that a 64bit OS is required on an ARM device. Setup is exactly the same, just use the latest tag and docker will auto pull the ARM version.
### Clients to support
- [x] Flatpak Package
- [ ] Nix Package
- [x] Aur Package
- [x] Helm Chart and repo for kubernetes deployment
- [x] Mobile Apps
- [x] Android App - Beta
- [ ] Android Auto support
- [x] iOS App
- [ ] Packaging and automation
## Screenshots :camera:
Main Homepage with podcasts displayed
<p align="center">
<img width="800" src="./images/screenshots/homethemed.png">
</p>
Loads of themes!
<p align="center">
<img width="600" src="./images/screenshots/home.png">
</p>
<p align="center">
<img width="600" src="./images/screenshots/homelight.png">
</p>
<p align="center">
<img width="600" src="./images/screenshots/homegreen.png">
</p>
Full Podcast Management
<p align="center">
<img width="800" src="./images/screenshots/podpage.png">
</p>
Browse through episodes
<p align="center">
<img width="800" src="./images/screenshots/podview.png">
</p>
Markdown and HTML display compatible
<p align="center">
<img width="800" src="./images/screenshots/markdownview.png">
</p>
Mobile support baked right in!
<p align="center">
<img width="300" src="./images/screenshots/mobile.png">
</p>
<p align="center">
<img width="300" src="./images/screenshots/mobileepisode.png">
</p>
#### Runners
ARM Images made possible by Runs-On:
https://runs-on.com
#### 📜 Credits & Licensing
PinePods is an open-source podcast player developed by Gooseberry Development, licensed under the GNU General Public License v3.0 (GPL-3.0).
The Pinepods Mobile app in the mobile directory includes code adapted from the excellent [Anytime Podcast Player](https://github.com/amugofjava/anytime_podcast_player), originally created by Ben Hills.
#### 🧩 Included Third-Party Code
**Anytime Podcast Player**
© 2020 Ben Hills and project contributors
Licensed under the BSD 3-Clause License
Portions of the mobile app retain the original BSD license and attribution as required. Files with this license are labeled at the top to clearly indicate. See the LICENSE.ben_hills in the mobile directory for details.
#### 💬 Acknowledgment
Huge thanks to Ben Hills for open-sourcing the Anytime Podcast Player. It served as a solid foundation and greatly accelerated development of PinePods.
#### 🌐 Translation
Translations are managed through [Weblate](https://hosted.weblate.org), a web-based translation tool that makes it easy for the community to contribute translations. If you'd like to help translate PinePods into your language, please visit our Weblate project and join the translation effort!

View File

@@ -1,24 +0,0 @@
pkgbase = pinepods
pkgdesc = Pinepods is a complete podcast management system and allows you to play, download, and keep track of podcasts you enjoy. All self hosted and enjoyed on your own server!
pkgver = 0.7.0
pkgrel = 1
url = https://github.com/madeofpendletonwool/PinePods
install = pinepods.install
arch = x86_64
arch = aarch64
license = gpl3
depends = cairo
depends = desktop-file-utils
depends = gdk-pixbuf2
depends = glib2
depends = gtk3
depends = hicolor-icon-theme
depends = libsoup
depends = pango
depends = webkit2gtk
options = !strip
options = !emptydirs
source_x86_64 = https://github.com/madeofpendletonwool/PinePods/releases/download/0.7.0/Pinepods_0.7.0_amd64.deb
source_aarch64 = https://github.com/madeofpendletonwool/PinePods/releases/download/0.7.0/Pinepods_0.7.0_arm64.deb
pkgname = pinepods

View File

@@ -1,14 +0,0 @@
pkgname=pinepods
pkgver=0.6.6
pkgrel=1
pkgdesc="Pinepods is a complete podcast management system and allows you to play, download, and keep track of podcasts you enjoy. All self hosted and enjoyed on your own server!"
arch=('x86_64' 'aarch64')
url="https://github.com/madeofpendletonwool/PinePods"
license=('gpl3')
depends=('cairo' 'desktop-file-utils' 'gdk-pixbuf2' 'glib2' 'gtk3' 'hicolor-icon-theme' 'libsoup' 'pango' 'webkit2gtk')
options=('!strip' '!emptydirs')
install=${pkgname}.install
source_x86_64=("https://github.com/madeofpendletonwool/PinePods/releases/download/$pkgver/Pinepods_"$pkgver"_amd64.deb")
source_aarch64=("https://github.com/madeofpendletonwool/PinePods/releases/download/$pkgver/Pinepods_"$pkgver"_arm64.deb")
sha256sums_x86_64=('SKIP')
sha256sums_aarch64=('SKIP')

View File

@@ -1,680 +0,0 @@
# Completed todos
This is the list of previous todos that are now completed
Major Version:
- [] iOS App
- [ ] Make sure youtube entirely works on playlists
- [ ] Make sure youtube entirely works on homepage
- [ ] Fix Virtual Line Spacing on Playlist Page
- [ ] Update /home/collinp/Documents/github/PinePods/web/src-tauri/com.gooseberrydevelopment.pinepods.metainfo.xml file along with flatpak automation. This must be done on each release
- [ ] Fix episode spacing on queue page. The context button still shows even on smallest screens
- [ ] Check youtube download Issues when changing the download time
0.8.2
- [x] Translations on the web app
- [x] Account Settings now updates dropdowns with pre-populated values
- [x] episode-layout (podcast page) will now set sort settings based on pod id
- [x] Added endpoint to delete OIDC settings
- [x] Added endpoint to Edit OIDC settings
- [x] Manually search or enter podcast index id for matching to podcast index
- [x] OIDC Setup on start
- [x] Better errors if needed vars are missing
- [x] Redis/Valkey Authentication
- [x] Move Episode Addition process to the background when adding a podcast
- [x] Support HTTP request notifications. Will work with Telegram and quite a few other basic http notification platforms
- [x] Podcast Merge Options
- [x] Individual Episode download on /episode page
- [x] Option to use Podcast covers if desired
- [x] Fix issue where release date on podcasts not added shows as current date/time
- [x] Fix yt-dlp issues
- [x] Gpodder Completion Set Bug where if episode played length was exactly the length of the podcast episode it wouldn't mark complete
- [x] Fixed issue with auto complete threshold. Will now mark historical episodes complete when enabled
- [x] Some sort of loading indicator for the single ep download
- [x] Fix issue where duplicate episodes were created if details of the episode were updated
- [x] Fully dynamic Playlist implementation
- [x] Checking on rss feeds returning downloaded urls correctly
0.7.9
- [x] Finish implementing long finger press - fix on iOS (close, it doesn't auto close when clicking away currently)
- [x] Finish making UI css adjustments
- [x] Fix error where refreshing on episode layout page causes panic
- [x] Issue with rss caused by new migration system
- [x] user stats gpodder sync css fix
- [x] Fix playback speed setting css
- [x] test ntfy sending on nightly
- [x] Test everything in mysql
- [x] Test everything in postgres
- [x] Test upgrades from previous in postgres
- [x] Test upgrades from previous in mysql
- [x] Test fresh postgres
- [x] Test fresh mysql
- [x] retest rss in nightly
- [x] Package upgrades
- [] Local downloads tauri are broken again
- [x] Fix downloads Layout
- [x] Finish super small screen visual Improvements
- [x] Return Gpodder info as part of get_stats
- [x] Allow for custom server Timezone
- [x] display gpodder info on the user stats page
- [x] 100 RSS feed limit
- [x] Add unique RSS feed keys to generated feeds
- [x] Updated youtube search results page to be similar to new pod results page
- [x] Improved search dropdown to be more compatible with more devices, also improved style
- [x] Added container time zone options
- [x] Finish playback speed Settings
- [x] Fix issue with the numbers auto updating
- [x] Playing works but results in really strange decimals
- [x] Fix known bugs with gpodder sync
- [x] Changed youtube search view to match podcast search view
- [x] Check opml import issues
- [x] Fixed issues with helm chart
- [x] Rebuilt db migration system to be far more reliable
0.7.8
- [x] External gpodder api message shows internal gpodder message
- [x] User refresh now shows refresh status in notification center
- [x] Potential home page issue on tauri app
- [x] Local download issue tauri app
- [x] Freaking caching
- [x] Fix spacing of play button on shared episodes page
- [x] Issue with client builds
- [x] Finish validating every call
- [x] When YT video is added we need to increment episode count
- [x] validate external pod sync platforms again
- [x] Validate YT feed deletion
- [x] The below error happens on Honoring Juneteenth from Short Wave. Seems to happen when there's an episode that goes longer than the expected possible length
- [x] Add youtube feed retention time setting onto settings for each pod
- [x] Finish custom pod notifications
- [x] Validate that mysql and postgres upgrade correctly
- [x] Weirdly different color trash can on podcast page
- [x] gpodder pod deletions on local
- [x] Fixed issue with time created by timestamps
- [x] Fix up warnings
- [x] Fixed up issue with saved search, and queue pages not showing saved and queued status correct in context button sometimes
- [x] Pinepods news feed not adding at all
- [x] episode count is being doubled
- [x] Show youtube feed cutoff only on youtube channels - It should also show a notification when updated
pinepods-1 | Error creating GPodder tables: 1061 (42000): Duplicate key name 'idx_gpodder_devices_userid'
pinepods-1 | Error setting up platlists: 1061 (42000): Duplicate key name 'idx_playlists_userid'
- [x] ^ On mariadb startup
- [x] postgres pod removals while pod sync enabled
0.7.6
- [x] Add ability to delete playlsits
- [x] Notification system
- [x] Ability to delete nextcloud
- [x] Finalize OIDC errors
- [x] Fix context menu on downloads page
- [x] adjust login screen component to be set amount down
- [x] Implement download_youtube_video_task
- [x] Fix specific issue with playlist creation
- [x] mysql tests
- [x] Go from 0.7.3 to 0.7.5 check startpage
- [x] Clean warnings
- [x] Check tauri
- [x] Update packages
- [] Automation implements correct SHA in the deb files
- [] release
- [] flatpak
Pre 0.7.4
- [x] Implement specific podcasts to pass to playlist creation. So you can choose specific ones
- [x] Make the create playlist function work
- [x] On deletion of a podcast delete any references to it in the playlist content func
- [x] Run a playlist refresh after adding a podcast, deleting a podcast, and any other time that makes sense. Maybe even on a standard refresh of podcasts?
- [x] Make states work on homepage. Saved or not, current progress, completed etc.
- [x] Make Podcast tiles Adapt better to large screens
- [x] Make podcast downloading not stop the server from functioning
- [x] Fixed an issue where sometimes chapters didn't load due to incorrect headers
- [x] Recent Episodes on homepage is not correct
- [x] All of mysql
- [x] Check almost done and currently listening playlists
- [x] Add user doesn't work on MYSQL
- [x] Upgrade from 0.7.3 to 0.7.4 works both postgres and mysql
- [x] Notifications in mysql
- [x] Validate Builds with tauri
- [x] Upgrade packages
- [ ] Build flatpak and ensure version bump
- [x] Adjusted Downloads page so that podcast headers take up less space
- [x] Ensure configured start page is navigated to
- [x] Ensure OIDC Logins work
- [x] Ensure Github logins work
- [x] Ensure Google Logins work
- [x] OIDC Logins
- [x] Smart Playlists
- [x] New Homepage Component
- [x] Experimental finger hold context button homepage
- [x] Configurable start page
- [x] Fixed issue where sometimes it was possible for images to not load for episodes and podcasts
- [x] Image Caching
- [x] Added fallback options for when podcast images fail to load. They will now route through the server if needed
- [x] Fixed filter button size consistency on Podcast Page
- [x] Additional filtering on Podcast page for incomplete and/or complete episodes
Next Minor Version:
- [ ] Ensure even when a podcast is clicked via the search page it still loads all the podcast db context
- [ ] Allow user to adjust amount of time to save/download youtube videos
- [ ] After adding podcast we no longer show dumpster
- [ ] Bad url while no channels added youtube
Version 0.7.3
- [x] Youtube Subscriptions
- [x] Fix refreshing so it handlees youtube subscriptions
- [x] Thumbnails for youtube episodes currently are just sections of the video
- [x] Validate some more channel adds
- [x] Speed up channel add process by only checking recent videos up to 30 days.
- [x] When searching channels show more recent vids than just one
- [x] Dynamic updating youtube channels
- [x] Delete youtube subs
- [x] Ensure youtube videos update completion/listen time status correctly
- [x] check refreshing on episode/other youtube related pages
- [x] Make /episode page work with youtube
- [x] Allowed and documented option to download episodes as specific user on host machine
- [x] Nextcloud Sync Fixed
- [x] Episode Completion Status is now pushed to Nextcloud/Gpodder
- [x] Adjusted Downloaded Episode titles to be more descriptive - Also added metadata
- [x] Fixed issue with news feed adding
- [x] Additional Podcast parsing when things are missing
- [x] Add pinepods news feed to any admin rather than hard id of 2
- [x] Fix recent episodes so it handles incompletes better
- [x] Check mark episode complete on episode page
- [x] Uncomplete/complete - and in prog episode sorting on episode_layout page
- [x] Add completed icon and in prog info to episodes on episode_layout page
- [x] Check for and fix issues with refreshing again on every page
- [x] Fix issue with episodes page opening when clicking show notes while on episodes page already
- [x] Fix issues with ability to open episode_layout page from episode page. That includes whether the podcast is added or not
- [x] Add podcastindexid to episode page url vars - Then pass to dynamic func call
- [x] Validate Mysql functions
- [x] Build clients and verify
- [x] Sometimes episodes are not even close to newest or right order in episode_layout
- [x] Think the weird yt double refreshing after search is messing up which one is subbed to
- [x] Queuing yt ep also queues standard pod counterpart id
Version 0.7.2
- [x] Mobile Progress line (little line that appears above the line player to indicate your progress in the episode)
- [x] Dynamically Adjusting chapters. Chapters now adapt and update as you play each one
- [x] Dynamic Play button. This means when you play an episode it will update to a pause button as you see it in a list of other episodes
- [x] Fixed issue where Gpodder wasn't adding in podcasts right away after being connected.
- [x] Fixed issues with admin user add component where you could adjust user settings
- [x] Also adjusted error messages on user component so that it's more clear what went wrong
- [x] Added in RSS feed capability. There's a new setting to turn on RSS feeds in the user settings. This will allow you to get a feed of all your Pinepods podcasts that you can add into another podcast app.
- [x] Individual Podcasts can also be subscribed to with feeds as well. Opening the individual Podcast page there's a new RSS icon you can click to get the feed
- [x] Fixed issues where theme wasn't staying applied sometimes
- [x] Added filtering throughout the app. You can now selectively filter whether podcasts is completed or in progress
- [x] Added quick search in numerous places. This allows you to quickly search for a podcast based on the name. Pages like History, Saved, Podcast have all gotten this
- [x] Added Sorting throughout the app. You can now sort podcasts in numerous ways, such as a-z, longest to shortest, newest to oldest, etc...
- [x] Fixed issue where images in descriptions could break the layout of episodes
- [x] Adjusted categories to look nicer in the podcast page
- [x] Fixed issues with DB backup options
- [x] Implemented DB restore options
- [x] Fixed issue where the Queue on mobile wasn't adjusting episode placement
Version 0.7.0
- [x] Android App
- [x] Flatpak Client
- [x] Snap Client
- [x] aur client
- [x] Added Valkey to make many processes faster
- [x] People Table with background jobs to update people found in podcasts
- [x] Subscribe to people
- [x] Add loading spinner when adding podcast via people page
- [x] Four new themes added
- [x] People page dropdowns on podcasts and episodes
- [x] Stop issues with timeouts on occation with mobile apps - Potentially fixed due to audio file caching. Testing needed
- [x] Virtual Lines implemented for Home and Episode Layout. This will improve performance on those pages greatly
- [x] Dynamically adjusting buttons on episode page
- [x] PodcastPeople DB up and running and can be contributed to
- [x] Show currently updating podcast in refresh feed button at top of screen
- [x] Fixed up remaining issues with user podcast refresh
- [x] Podcast 3x layout
- [x] Finalize loading states so you don't see login page when you are already authenticated
- [x] Using valkey to ensure stateless opml imports
- [x] Android play/pause episode metadata
- [x] Draggable Queues on Mobile Devices
- [x] Make Chapters much nicer. Nice modern look to them
- [x] Add background task to remove shared episode references in db after 60 days
- [x] Dynamically adjusting Download, Queue, and Saved Episodes so that every page can add or remove from these lists
- [x] Fixed issue where some episodes weren't adding when refreshing due to redirects
- [x] Some pods not loading in from opml import - better opml validation. Say number importing. - OPML imports moved to backend to get pod values, also reporting function created to update status
- [x] Update queue slider to be centered
- [x] People don't clear out of hosts and people dropdowns if a podcast doesn't have people. So it shows the old podcast currently
- [x] div .title on audio player is now a link, not selectable text.
- [x] Improved the playback and volume dropdowns so they don't interact with the rest of the page now
- [x] Added some box shadow to the episode image in the full screen player
- [x] When playing an episode <- and -> arrow keys skips forward and back for the playback now
- [x] Layout improved all over the place
- [x] Phosphor icons implemented as opposed to material
- [x] Settings page layout rebuilt
- [x] Better handle description html formatting
Version 0.6.6
- [x] Manually adjust tags for podcast in podcast settings
- [x] Dynamically refresh tags on ep-layout when adding and removing them
- [x] Removed see more button from the episodes_layout, queue, and downloads page
- [x] Added a People page so that you can see other episodes and podcasts a particular person has been on
- [x] Speed up people page loading (happens in async now)
- [x] Add loading component to people page loading process
- [x] Added category filtering to podcasts page
- [x] Link Sharing to a podcast to share and allow people to listen to that episode on the server without logging in
- [x] Update api key creation and deletion after change dynamically with use_effect
- [x] Update mfa setup slider after setup dynamically with use_effect
- [x] Fixed refreshing on episode screen so it no longer breaks the session
- [x] Fixed refreshing on episode-layout screen so it no longer breaks the session
- [x] Fixed issue with episode page where refreshing caused it to break
- [x] Fixed issue with queue where episode couldn't be manually removed
- [x] Added loading spinner when opening an episode to ensure you don't momentarily see the wrong episode
- [x] Improve Filtering css so that things align correctly
- [x] Made the button to add and remove podcasts more consistent (Sometimes it was just not registering)
- [x] Upgraded pulldown-cmark library
- [x] Upgraded python mysql-connection library to 9
- [x] Upgraded chrono-tz rust library
- [x] mac version attached like this:
- [x] Update Rust dependancies
CI/CD:
- [x] mac version attached like this:
dmg.Pinepods_0.6.5_aarch64.dmg - Also second mac archive build failed
- [x] Fix the archived builds for linux. Which are huge because we include a ton of appimage info
- [x] Add in x64 mac releases
- [x] Build in arm cross compile into ubuntu build
Version 0.6.5
- [x] Fixed issue with Podcasts page not refreshing correctly
- [x] Added Add Custom Feed to Podcasts page
- [x] Allow for podcast feeds with user and pass
- [x] Add option to add podcast from feed on podcasts page
- [x] Ensure podcast loads onto podcast page when adding a new custom one in
- [x] Adjusted buttons on episode layout page so they dynamically adjust position to fit better
- [x] Option for user to manually update feeds
- [x] Update Feed directly after adding a Nextcloud/gpodder sync server instead of waiting for the next refresh
- [x] Fixed issue with episode refreshing where a panic could occur (This was due to the categories list)
- [x] Ensured See More Button only shows when needed (Just made the descriptions clickable)
- [x] Fixed issue with context for podcasts not dynamically updating on the episode layout page once the podcast was added to the db
- [x] Fixed issue with nextcloud sync on mysql dbs
- [x] Fixed issue with db setup with mysql
- [x] Ensured deleting podcast when on the episode layout page it closes the deleted modal
Version 0.6.4
- [x] Added a fallback to the opml import for when the opml file uses text instead of title for the podcast name key
- [x] Added a new route for the version tag that dynamically updates when the application is compiled. This allows for automation around the version numbers all based around the the Github release tag as the original source of truth.
- [x] Fixed layout for podcasts when searching
- [x] Support floating point chapters
- [x] Fixed issue with white space at the bottom of every page #229
- [x] Cleaned up incorrect or not needed logging at startup #219
- [x] Fixed issue with user stats page where it would lose user context on reload #135
- [x] Fixed issue with settings page where it would lose user context on reload #134
- [x] Fixed issue with episode_layout page where it would lose user context on reload and also made podcasts sharable via link #213
- [x] Fixed issue where podcast episode counts wouldn't increment after initial add to the db
- [x] Ugraded gloo::net to 0.6.0
- [x] Upgraded openssl in src-tauri to 0.10.66
- [x] Upgraded a few other rust depends to next minor version
- [x] Added loading spinner to custom feed and implemented more clear success message
- [x] Fixed postgres return issue on user_stats route
- [x] Fixed postgres return issue on mfa return route
- [x] Fixed delete api key route for postgres
- [x] Implemented adjustment on all modals throughout the app so clicking outside them closes them (episode layout confiramtions missing yet - also test all login modals)
- [x] Implemented adjustment on all modals so that they overlap everything in the app (This was causing issues on small screens)
- [x] Added Confirmation dialog modal to podcast deletion on /podcasts layout page
- [x] Changed name of bt user to background_tasks to make the user more clear on api key settings display
Version 0.6.3
- [x] Jump to clicked timestamp
- [x] Full Chapter Support (Support for floating points needed yet)
- [x] Chapter Image Support
- [x] Basic Support for People Tags (Host and Guest)
- [x] Support for Funding Tags
- [x] Draggable Queue placement
- [x] Fixed issue with self service user creation when using a postgres db
- [x] Rebuilt the Podcast Episode Layout display page so that on small screens everything fits on screen and looks much nicer
- [x] Rebuilt the Single Episode display page so that on small screens everything fits on screen and looks much nicer
- [x] Fixed Issue with Episodes on small screens where if a word in the title was long enough it would overflow the container
- [x] Adjusted the Podcast Episode Layout display page so that you can click and episode title and view the description
- [x] Removed Unneeded space between First episode/podcast container and the title bar at the top on multiple pages - Just cleans things up a bit
- [x] Fixed image layout issue where if episode had wide image it would overflow the container and title text
- [x] Fixed issue with categories where it showed them as part of a dictionary and sometimes didn't show them at all
- [x] Added verification before downloading all episodes since this is quite a weighty process
- [x] Added Complete Episode Option to Episode Page
- [x] Adjusted downloads page to display the number of downloaded episodes instead of the number of episodes in the podcast
- [x] Added Episode Completion Status to Episode Page
- [x] Fixed Issue with Postgres DBs where sometimes it would return dictionaries and try to refresh episodes using :podcastid as the podcast id. Now it always refreshes correctly
- [x] Fixed issue where when using postgres the User Created date on the user stats page would display the unix Epoch date
- [x] Added Validations on Episode layout page to verify the user wants to delete the podcast or download all episodes
Pre launch tests:
Check routes for mysql and postgres
Create self service user on mysql and postgres
Version 0.6.2
- [x] Kubernetes deployment option with helm
- [x] Easy to use helm repo setup and active https://helm.pinepods.online
- [x] Added Local Download support to the client versions
- [x] Local Downloads and Server Downloads tabs in client versions
- [x] Created logic to keep track of locally downloaded episodes
- [x] Episodes download using tauri function
- [x] Episodes play using tauri functions
- [x] Episodes delete using tauri functions
- [x] Create a system to queue the local download jobs so that you don't need to wait for the downloads to complete
- [x] Added offline support to the client versions.
- [x] Installable PWA
- [x] Fixed bug where some requests would queue instead of clearing on continued episode plays. For example, if you played an episode and then played another episode, the first episode would still make reqeuests for updating certain values.
- [x] Fixed issue with postgres dbs not adding episodes after addding a Nextcloud sync server (It was calling the refresh nextcloud function in the wrong file)
- [x] Fixed issue with manual completion where it only could complete, but not uncomplete
- [x] Fixed issue in downloads page where see more button didn't work on episodes
Version 0.6.1
- [x] Add support for gpodder sync standalone container. You can now sync to either Nextcloud or a gpodder standalone server that supports user and passwords.
- [x] Volume control in the player
- [x] Fixed a couple parsing issues with mysql dbs found after implementing the new postgres support
- [x] Fixed issue where MFA couldn't be disabled. It just tried to enable it again.
- [x] Fixed issue with time zone parsing in postgres and mysql dbs
- [x] Implemented a mac dmg client
- [x] Added Current Version to User Stats Page
Version 0.6.0
- [x] Added Postgresql support
- [x] Added option to podcast pages to allow for downloading every episode
- [x] Enhanced downloads page to better display podcasts. This improves archival experience
- [x] Added ability to download all episodes of a podcast at once with a button
- [x] Added Individual Podcast Settings Button
- [x] Completed status added so podcasts can be marked as completed manually and will auto complete once finished
- [x] Auto Download Episodes when released for given podcasts
- [x] Added Auto Skip options for intro and outros of podcasts
- [x] Fixed issue where episodes could be downloaded multiple times
Version 0.5.4
- [x] Fixed enter key to login when highlighted on username or password field of login page
- [x] Created a confirmation message when a user gets created using self service user creation
- [x] Fixed issue with viewing episodes with certain podcasts when any episodes were missing a duration
- [x] Fixed issue where release date would show current timestamp when the podcast wasn't added to the db
- [x] Added user deletion option when editing a user
- [x] Fixed issue with password changing in the ui. It now works great.
Version 0.5.3
- [x] Fix appearance and layout of podcasts on podcast screen or on searching pages. (Also added additional see more type dropdowns for descriptions to make them fit better.)
- [x] Fix mobile experience to make images consistently sized
- [x] Fixed layout of pinepods logo on user stats screen
- [x] Expanded the search bar on search podcasts page for small screens. It was being cut off a bit
- [x] Fixed order of history page
- [x] Downloads page typo
- [x] Improve look of search podcast dropdown on small screens
- [x] Made the setting accordion hover effect only over the arrows.
- [x] Added area in the settings to add custom podcast feeds
- [x] Added a Pinepods news feed that gets automatically subscribed to on fresh installs. You can easily unsubscribe from this if you don't care about it
- [x] Added ability to access episodes for an entire podcast from the episode display screen (click the podcast name)
- [x] Created functionality so the app can handle when a feed doesn't contain an audio file
- [x] Added playback speed button in the episode playing page. Now you can make playback faster!
- [x] Added episode skip button in the episode playing page. Skips to the next in the queue.
- [x] Fixed issue with the reverse button in the episode page so that it now reverses the playback by 15 seconds.
- [x] Fixed issue where spacebar didn't work in app when episode was playing
- [x] Added and verified support for mysql databases. Thanks @rgarcia6520
Version 0.5.2
- [x] Fixed issue with removal of podcasts when no longer in nextcloud subscription
- [x] Fixed scrolling problems where the app would sometimes start you at the bottom of the page when scrolling to different locations.
- [x] Fixed issue where a very occaitional podcast is unable to open it's feed. This was due to podcast redirects. Which caused the function to not work. It will now follow a redirect.
- [x] Fixed an issue where podcasts would be removed after adding when nextcloud sync is active
- [x] Added Nextcloud timestamp functionality. Podcasts will now sync listen timestamps from nextcloud. Start an episode on pinepods and finish it on Antennapods!
- [x] Added css files for material icons rather than pulling them down from Google's servers (Thanks @civilblur)
- [x] Fixed display issue on the search bar so it correctly formats itunes and podcast index
- [x] Added in check on the podcast page to check if the podcast has been added. This allows the podcast to have the context button if it's added to the db
- [x] Readjusted the format of episodes on screen. This tightens them up and ensures they are all always consistently sized. It also allows more episodes to show at once.
- [x] Added loading icon when a podcast is being added. This gives some feedback to the user during a couple seconds it takes to add the feed. (Also improved the look of that button)
- [x] Fixed date formatting issue on all pages so they format using the user's timezone preferences.
- [x] Added notifications when saving, downloading, or queueing episode from search page.
- [x] Improved look at the episode page. Fixed up the spacing and the buttons.
Version 0.5.1
- [x] Fixed Nextcloud cors issues that were appearing due to requests being made from the client side
- [x] Fixed Docker auto uploads in actions CI/CD
Version 0.5.0
- [x] Complete Rust WASM Rebuild
- [x] Make Timestamps with with Auto Resume
- [x] Nextcloud Subscriptions
- [x] Finalize User Stats recording and display
- [x] MFA Logins
- [x] User Settings
- [x] Ensure Queue Functions after episode End
- [x] Auto Update Button interactions based on current page. (EX. When on saved page - Save button should be Remove from Saved rather than Save)
- [x] Refresh of podcasts needs to be async (Currently that process stops the server dead)
- [x] Make the Queue functional and verify auto removals and adds
- [x] Downloads Page
- [x] Backup Server
- [x] Allow for episodes to be played without being added
- [x] Fix images on some podcasts that don't appear. Likely a fallback issue
- [x] Issues occur server side when adding podcast without itunes_duration
(pinepods-1 | Error adding episodes: object has no attribute 'itunes_duration')
- [x] Click Episode Title to Open into Episode Screen
- [x] Duration Not showing when podcast played from episode layout screen
- [x] Episodes not appearing in history (Issue due to recent episode in db check)
- [x] Panic being caused when searching podcasts sometimes (due to an empty value) <- Silly Categories being empty
- [x] Auto close queue, download, save context menu when clicking an option or clicking away from it
- [x] Added login screen random image selection. For some nice styling
- [x] Check for Added Podcasts to ensure you can't add a second time. Searching a podcast already added should present with remove button instead of add < - On search results page (done), on podcasts page (done), and on podcast episode list page
- [x] Show Currently Connected Nextcloud Server in settings
- [x] Allow Setting and removing user admin status in settings
- [x] Show released time of episodes - use function call_get_time_info in pod_reqs (Additional date format display implemented along with AM/PM time based on user pref)
- [x] Require Re-Login if API Key that's saved doesn't work
- [x] Episodes directly get the wrong images sometimes. This likely has to do with the way the database is parsing the podcasts as they refresh and pull in. (Should be fixed. Need to allow feeds to load in some episodes to know for sure)
- [x] Episode Releases are showing now time. Rather than actual release in app (Bug with Parsing)
- [x] Consistent Styling Throughout
- [x] Setup All Themes
- [x] Downloads page playing streamed episodes. Should stream from server files
- [x] Loading icon in the center of screen while episodes load in (Done on home - Further test)
- [x] Podcasts show episode images sometimes on podcasts page for some reason (This was because it used the first episode in the feed for the import. Not anymore)
- [x] Initial Screen loading as we pull in context - It swaps a lot quicker now. Theme stores itself in local storage
- [x] Run Podcast Descriptions on Podcasts page through html parsing
- [x] Fix all auth Problems with redirecting and episodes loading (Solution Found, implementing on all routes) <- Fixed, F5 now returns you to the page you were previously on
- [x] Nextcloud Subscription Timestamps
- [x] Verify Users only see what they have access to
- [x] Do not delete theme context on logout
- [x] Make validations work correctly on login user create
- [x] Make no or wrong pass display error in server Restore and Backup
- [x] Improve Import Experience
- [x] Update All Depends
- [x] Loading animations where if makes sense
- [x] Verify Funtional Mobile Version (Functional - Will be made better with time)
- [x] Cleanup prints on server and client end. Make debugging functionality work again
- [x] Fix all CORs Issues - Verify behind Reverse Proxy (Seems to all work great with no issues)
- [x] Client release with Tauri (Compiles and runs. Feature testing needed - Mainly Audio) <- Audo tested and working. Everything seems to be totally fine.
- [x] Automation - client auto release and compile - auto compile and push to docker hub
- [x] Revamp Readme
- [x] Cors errors when browsing certain podcast results
- [x] Perfect the scrubbing (Mostly good to go at this point. The only potential issue is the coloring. Another pass on colors will be done after the first beta release.)
- [x] Itunes
- [x] Revamp Documentation
Version 0.5.0
- [x] v0.1 of Pinepods Firewood released!
- [x] Nextcloud Gpodder Support added to Pinepods!
Version 0.4.1
- [x] Fixed issue where get_user_episode_count wasn't displaying episode numbers. There was a syntax error in the api call
- [x] Added /api/data/podcast_episodes and /api/data/get_podcast_id api calls. These are needed for Pinepods Firewood
Version 0.4
- [x] Unlock api creation for standard users - The API has been completely re-written to follow along the permissions that users actually have. Meaning users can easily request their own api keys and sign into the client with admin consent
- [x] Signing into the client edition is now possible with either an API key or username and password sign in. It gives the option to choose which you would prefer.
- [x] Email resets currently broken for non-admins due to lockdown on encryption key. Need to handle encryption server-side
- [x] Client version images load a lot faster now
- [x] Fixed issue with audio container not reappearing after entering playing fullscreen
- [x] Fixed Issue with Queue Bump Not working right
- [x] Added verification when deleting user
Version 0.3.1
- [x] Finalize reverse proxy processes and web playing
Version 0.3
- [x] Export and import of following podcasts (basically user backups)
- [x] Entire Server Backup and Import. This allows you to export and import your entire database for complete backups
- [x] New refresh system added to automatically update podcasts in database with no user input.
- [x] Reworked the controls displayed on the page to be components of a class. This should improve performance.
- [x] fixed issues with logging in on small screens. (a big step for mobile version)
- [x] Bug fixing such as fixing queue bump, and fixing an audio changing issue - Along with quite a few random UI bug fixing throughout
Version 0.2
- [x] Implement custom urls for feeds
- [x] Organize folder layout in the same way as the client when server downloading
Version 0.1
- [X] Create Code that can pull Podcasts
- [X] Integrate Podcast Index
- [X] Play Audio Files using Python - Flet's Audio library is used
- [X] Record listen history and display user history on specific page
- [X] Record accurate listen time. So if you stop listening part-way through you can resume from the same spot
- [X] Scrubbing playback from a progress bar - ft.slider()
- [X] Visual progress bar based on time listened to podcasts partly listened to
- [X] Download option for podcasts. In addition, display downloaded podcasts in downloads area. Allow for deletion of these after downloaded
- [X] Queue, and allow podcasts to be removed from queue once added (Queue is added but you can't remove them from it yet)
- [X] Login screen
- [X] Episode view (Also display html in descriptions via markdown)
- [X] Multiple Themes (like 10 I think?)
- [X] Add picture of current episode to soundbar
- [X] Complete user management with admin options
- [X] Ability to Delete Users
- [X] Allow guest user to be disabled (Is disabled by default)
- [X] Ensure changes cannot be made to guest user
- [X] Ensure Users cannot delete themselves
- [X] Guest sign in via button on login screen when enabled
- [X] Saved episodes view
- [X] Caching image server (proxy)
- [X] Optional user self service creation
- [X] User stats page
- [X] Implement sign in retention. (App retention now works. It creates session keys and stores them locally. Browser retention is next, this will need some kind of oauth)
- [X] Audio Volume adjustment options
- [X] Create Web App
- [X] Responsive layout
- [X] Security and Logins
- [X] Database interaction for users and podcast data
- [x] Fully update Readme with updated info and docs including deployment guide
- [X] Bugs
- [X] Links when searching an episode are blue (wrong color)
- [X] When changing theme, then selecting 'podcasts' page, the navbar does not retain theme
- [X] There's an issue with Queue not working properly. Sometimes it just plays instead of queues (Fixed when switching to flet audio control)
- [X] Clicking podcast that's already been added displays add podcast view with no current way to play
- [X] Clicking play buttons on a podcast while another is loading currently breaks things
- [X] Pausing audio changes font color
- [X] Login screen colors are wrong on first boot
- [X] Themeing currently wrong on audio interaction control
- [X] Starting a podcast results in audio bar being in phone mode on application version (This should be fixed. I load the check screensize method now further down the page. Which results in consistent width collection.)
- [X] Starting a podcast results in audio bar being in phone mode on application version
- [X] Adding a podcast with an emoji in the description currently appears to break it
- [X] Layout breaks when pausing for podcast names
- [X] The queue works but currently does not remove podcasts after switching to a new one
- [X] Resume is currently broken (it now works but it double plays an episode before resuming for some reason. It still double plays and there's not a great way to fix it. Return later. Updates to flet are likely to help eventually)
- [X] Double check 2 users adding the same podcast (There was an issue with checking playback status that is now fixed)
- [X] After refresh auto update current route
- [X] Double and triple check all interactions to verify functionality
- [X] Fix any additional browser playback bugs (Audio now routes properly through the proxy)
- [x] Dockerize
- [X] Package into Container/Dockerfile
- [X] Pypods image in docker hub
- [X] Create Docker-Compose Code
- [X] Mixed content - Currently running http or https content can cause an error
- [x] Option to run your own local podcast index api connection
- [x] Implement Gravitar API for profile picture
- [x] Make web version utilize API Routes instead of database connections directly
- [x] Update flet dependancy to v6 (This fixes audio routing)
- [x] Ability to disable downloads (for public servers)
- [x] One set of functions. Currently client and web app uses different function set. This is be changed for consistency.
- [x] GUI Wrapper for App
- [x] Server Hosting and client Interaction - Client interaction works via API with mariadb which is hosted on server side
- [x] Options to create API keys on the web client as well as ability to remove them
- [x] Linux App
- [x] Install Script
- [x] Packaging and automation
- [X] Proper web layout
- [x] Windows App
- [x] Packaging and automation
- [x] Mac App
- [x] Packaging and automation
- [x] Self Service PW Resets
- [x] Add creator info to bottom of stats page
- [x] Default User Creation (Default User is now created if user vars aren't specified in compoose file)
- [x] Issue with web search bar may be due to appbar (This was a rabbit hole. Turns out this was due to the way the top bar was created prior to the routes. I needed to rebuild how searching is done, but this is now fixed)
- [x] Occasionally podcasts will put seconds value in mins (This was a bug due to duration parsing. Code fixed, everything now displays properly)
- [x] Fix client pooling issue (This is a tough issue. Pooling is occasionally a problem. I set the idle timeout to kill old connections and I also fixed a couple database connections that didn't run cnx.close) Edit: I actually think this is truly fixed now. I rebuilt the way this works using async, no problems so far
- [x] Rebuild image Pulling process. The current one is just unworkable (It runs a lot better now. It spawns 4 workers to handle image gathering. Though it still isn't perfect, it hangs a bit occationally but for the time being it's totally usable)
- [x] Layout Settings page better
- [x] MFA Logins
- [x] Allow local downloads to just download the mp3 files direct (Likely only possible on app version)
- [x] Add Itunes podcast API
- [x] MFA Logins on web version
- [x] Do something when search results aren't found - Currently Blank screen
- [x] Implement smoother scrolling with big list loading (I've started a fix for this. ListViews are now active and working right on home and podview)
- [x] Option to remove from history
- [x] Reload not needed to add and remove episodes from pages
- [x] Add mfa to dynamic settings class
- [x] Add new users to dynamic settings class
- [x] Add Email settings to dynamic users class
- [x] logout on client remove saved app cache (Implemented button in settings to clear cache)
- [x] On top bar cutoff add a search button that opens a search prompt (There's a small version of the search button now)
- [x] custom timezone entry
- [x] MFA Display totp secret
- [x] Fix guest with timezone stuff
- [x] 2.0 description features
- [x] Mass downloading episodes. Entire podcast at once (Implemented but I'm working on getting it to display on download page to see status)
- [x] Remove local podcasts if podcast is no longer in database - Handle this somehow - Mass delete feature added
- [x] Speed up database queries (Indexing added to episodes and podcasts)
- [x] Check local downloads if already downloaded
- [x] Allow description view on podcasts not added
- [x] Configure some kind of auto-refresh feature - Refreshes now on first boot and once every hour
- [x] Mass download options not working on web
- [x] Issue with loading poddisplay on web
- [x] Search options missing from web (Restored - Entirely due to flet jank from app to web)
- [x] Small layout Improvements (Try, complete layout overhaul actually)
- [x] Apparently I broke itunes searching (description addition was causing a problem)
- [x] Internal Episode Search
- [x] Refresh causes episode to restart
- [x] Fix logout - It's shows navbar still
- [x] Refresh with nothing in database breaks things
- [x] Revamp queue - It should just save to the database
- [x] Refresh changes on readme
- [x] API documentation (Site Built with Docusaurus)

View File

@@ -1,137 +0,0 @@
#!/usr/bin/env python3
"""
Database Migration Runner for PinePods
This script can be run standalone to apply database migrations.
Useful for updating existing installations.
"""
import os
import sys
import logging
import argparse
from pathlib import Path
# Set up logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Add pinepods to path
pinepods_path = Path(__file__).parent.parent
sys.path.insert(0, str(pinepods_path))
def run_migrations(target_version=None, validate_only=False):
"""Run database migrations"""
try:
# Import migration system
import database_functions.migration_definitions
from database_functions.migrations import get_migration_manager, run_all_migrations
# Register all migrations
database_functions.migration_definitions.register_all_migrations()
# Get migration manager
manager = get_migration_manager()
if validate_only:
logger.info("Validating existing migrations...")
success = manager.validate_migrations()
if success:
logger.info("All migrations validated successfully")
else:
logger.error("Migration validation failed")
return success
# Show current state
applied = manager.get_applied_migrations()
logger.info(f"Currently applied migrations: {len(applied)}")
for version in applied:
logger.info(f" - {version}")
# Run migrations
logger.info("Starting migration process...")
success = run_all_migrations()
if success:
logger.info("All migrations completed successfully")
else:
logger.error("Migration process failed")
return success
except Exception as e:
logger.error(f"Migration failed: {e}")
return False
def list_migrations():
"""List all available migrations"""
try:
import database_functions.migration_definitions
from database_functions.migrations import get_migration_manager
# Register migrations
database_functions.migration_definitions.register_all_migrations()
# Get manager and list migrations
manager = get_migration_manager()
applied = set(manager.get_applied_migrations())
logger.info("Available migrations:")
for version, migration in sorted(manager.migrations.items()):
status = "APPLIED" if version in applied else "PENDING"
logger.info(f" {version} - {migration.name} [{status}]")
logger.info(f" {migration.description}")
if migration.requires:
logger.info(f" Requires: {', '.join(migration.requires)}")
return True
except Exception as e:
logger.error(f"Failed to list migrations: {e}")
return False
def main():
"""Main CLI interface"""
parser = argparse.ArgumentParser(description="PinePods Database Migration Tool")
parser.add_argument(
"command",
choices=["migrate", "list", "validate"],
help="Command to execute"
)
parser.add_argument(
"--target",
help="Target migration version (migrate only)"
)
parser.add_argument(
"--verbose", "-v",
action="store_true",
help="Enable verbose logging"
)
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# Execute command
if args.command == "migrate":
success = run_migrations(args.target)
elif args.command == "list":
success = list_migrations()
elif args.command == "validate":
success = run_migrations(validate_only=True)
else:
logger.error(f"Unknown command: {args.command}")
success = False
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@@ -1,530 +0,0 @@
"""
Database Migration System for PinePods
This module provides a robust, idempotent migration framework that tracks
applied migrations and ensures database schema changes are applied safely.
"""
import logging
import os
import sys
from typing import Dict, List, Optional, Callable, Any
from dataclasses import dataclass
from datetime import datetime
import hashlib
# Add pinepods to path for imports
sys.path.append('/pinepods')
# Database imports
try:
import psycopg
POSTGRES_AVAILABLE = True
except ImportError:
POSTGRES_AVAILABLE = False
try:
import mariadb as mysql_connector
MYSQL_AVAILABLE = True
except ImportError:
try:
import mysql.connector
MYSQL_AVAILABLE = True
except ImportError:
MYSQL_AVAILABLE = False
logger = logging.getLogger(__name__)
@dataclass
class Migration:
"""Represents a single database migration"""
version: str
name: str
description: str
postgres_sql: Optional[str] = None
mysql_sql: Optional[str] = None
python_func: Optional[Callable] = None
requires: List[str] = None # List of migration versions this depends on
def __post_init__(self):
if self.requires is None:
self.requires = []
class DatabaseMigrationManager:
"""Manages database migrations with support for PostgreSQL and MySQL/MariaDB"""
def __init__(self, db_type: str, connection_params: Dict[str, Any]):
self.db_type = db_type.lower()
self.connection_params = connection_params
self.migrations: Dict[str, Migration] = {}
self._connection = None
# Validate database type
if self.db_type not in ['postgresql', 'postgres', 'mariadb', 'mysql']:
raise ValueError(f"Unsupported database type: {db_type}")
# Normalize database type
if self.db_type in ['postgres', 'postgresql']:
self.db_type = 'postgresql'
elif self.db_type in ['mysql', 'mariadb']:
self.db_type = 'mysql'
def get_connection(self):
"""Get database connection based on type"""
if self._connection:
return self._connection
if self.db_type == 'postgresql':
if not POSTGRES_AVAILABLE:
raise ImportError("psycopg not available for PostgreSQL connections")
self._connection = psycopg.connect(**self.connection_params)
elif self.db_type == 'mysql':
if not MYSQL_AVAILABLE:
raise ImportError("MariaDB/MySQL connector not available for MySQL connections")
# Use MariaDB connector parameters
mysql_params = self.connection_params.copy()
# Convert mysql.connector parameter names to mariadb parameter names
if 'connection_timeout' in mysql_params:
mysql_params['connect_timeout'] = mysql_params.pop('connection_timeout')
if 'charset' in mysql_params:
mysql_params.pop('charset') # MariaDB connector doesn't use charset parameter
if 'collation' in mysql_params:
mysql_params.pop('collation') # MariaDB connector doesn't use collation parameter
self._connection = mysql_connector.connect(**mysql_params)
return self._connection
def close_connection(self):
"""Close database connection"""
if self._connection:
self._connection.close()
self._connection = None
def register_migration(self, migration: Migration):
"""Register a migration to be tracked"""
self.migrations[migration.version] = migration
logger.info(f"Registered migration {migration.version}: {migration.name}")
def create_migration_table(self):
"""Create the migrations tracking table if it doesn't exist"""
conn = self.get_connection()
cursor = conn.cursor()
try:
if self.db_type == 'postgresql':
cursor.execute("""
CREATE TABLE IF NOT EXISTS "schema_migrations" (
version VARCHAR(255) PRIMARY KEY,
name VARCHAR(255) NOT NULL,
description TEXT,
checksum VARCHAR(64) NOT NULL,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
execution_time_ms INTEGER
)
""")
else: # mysql
cursor.execute("""
CREATE TABLE IF NOT EXISTS schema_migrations (
version VARCHAR(255) PRIMARY KEY,
name VARCHAR(255) NOT NULL,
description TEXT,
checksum VARCHAR(64) NOT NULL,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
execution_time_ms INTEGER
)
""")
conn.commit()
logger.info("Migration tracking table created/verified")
except Exception as e:
logger.error(f"Failed to create migration table: {e}")
conn.rollback()
raise
finally:
cursor.close()
def get_applied_migrations(self) -> List[str]:
"""Get list of applied migration versions"""
conn = self.get_connection()
cursor = conn.cursor()
try:
table_name = '"schema_migrations"' if self.db_type == 'postgresql' else 'schema_migrations'
cursor.execute(f"SELECT version FROM {table_name} ORDER BY applied_at")
return [row[0] for row in cursor.fetchall()]
except Exception as e:
# If table doesn't exist, return empty list
logger.warning(f"Could not get applied migrations: {e}")
return []
finally:
cursor.close()
def calculate_migration_checksum(self, migration: Migration) -> str:
"""Calculate checksum for migration content"""
content = ""
if migration.postgres_sql and self.db_type == 'postgresql':
content += migration.postgres_sql
elif migration.mysql_sql and self.db_type == 'mysql':
content += migration.mysql_sql
if migration.python_func:
content += migration.python_func.__code__.co_code.hex()
return hashlib.sha256(content.encode()).hexdigest()
def record_migration(self, migration: Migration, execution_time_ms: int):
"""Record a migration as applied"""
conn = self.get_connection()
cursor = conn.cursor()
try:
checksum = self.calculate_migration_checksum(migration)
table_name = '"schema_migrations"' if self.db_type == 'postgresql' else 'schema_migrations'
if self.db_type == 'postgresql':
cursor.execute(f"""
INSERT INTO {table_name} (version, name, description, checksum, execution_time_ms)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (version) DO NOTHING
""", (migration.version, migration.name, migration.description, checksum, execution_time_ms))
else: # mysql
cursor.execute(f"""
INSERT IGNORE INTO {table_name} (version, name, description, checksum, execution_time_ms)
VALUES (%s, %s, %s, %s, %s)
""", (migration.version, migration.name, migration.description, checksum, execution_time_ms))
conn.commit()
logger.info(f"Recorded migration {migration.version} as applied")
except Exception as e:
logger.error(f"Failed to record migration {migration.version}: {e}")
conn.rollback()
raise
finally:
cursor.close()
def check_dependencies(self, migration: Migration, applied_migrations: List[str]) -> bool:
"""Check if migration dependencies are satisfied"""
for required_version in migration.requires:
if required_version not in applied_migrations:
logger.error(f"Migration {migration.version} requires {required_version} but it's not applied")
return False
return True
def execute_migration(self, migration: Migration) -> bool:
"""Execute a single migration"""
start_time = datetime.now()
conn = self.get_connection()
try:
# Choose appropriate SQL based on database type
sql = None
if self.db_type == 'postgresql' and migration.postgres_sql:
sql = migration.postgres_sql
elif self.db_type == 'mysql' and migration.mysql_sql:
sql = migration.mysql_sql
# Execute SQL if available
if sql:
cursor = conn.cursor()
try:
# Split and execute multiple statements
statements = [stmt.strip() for stmt in sql.split(';') if stmt.strip()]
for statement in statements:
cursor.execute(statement)
conn.commit()
logger.info(f"Executed SQL for migration {migration.version}")
except Exception as e:
conn.rollback()
raise
finally:
cursor.close()
# Execute Python function if available (this is the main path for our migrations)
if migration.python_func:
try:
migration.python_func(conn, self.db_type)
conn.commit()
logger.info(f"Executed Python function for migration {migration.version}")
except Exception as e:
conn.rollback()
raise
# Record successful migration
execution_time = int((datetime.now() - start_time).total_seconds() * 1000)
self.record_migration(migration, execution_time)
logger.info(f"Successfully applied migration {migration.version}: {migration.name}")
return True
except Exception as e:
logger.error(f"Failed to execute migration {migration.version}: {e}")
try:
conn.rollback()
except:
pass # Connection might already be closed
return False
def detect_existing_schema(self) -> List[str]:
"""Detect which migrations have already been applied based on existing schema"""
conn = self.get_connection()
cursor = conn.cursor()
applied = []
try:
# Check for tables that indicate migrations have been applied
checks = {
"001": ['"Users"', '"OIDCProviders"', '"APIKeys"', '"RssKeys"'],
"002": ['"AppSettings"', '"EmailSettings"'],
"003": ['"UserStats"', '"UserSettings"'],
"005": ['"Podcasts"', '"Episodes"', '"YouTubeVideos"'],
"006": ['"UserEpisodeHistory"', '"UserVideoHistory"'],
"007": ['"EpisodeQueue"', '"SavedEpisodes"', '"DownloadedEpisodes"']
}
for version, tables in checks.items():
all_exist = True
for table in tables:
if self.db_type == 'postgresql':
cursor.execute("""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public' AND table_name = %s
)
""", (table.strip('"'),))
else: # mysql
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = DATABASE() AND table_name = %s
""", (table,))
exists = cursor.fetchone()[0]
if not exists:
all_exist = False
break
if all_exist:
applied.append(version)
logger.info(f"Detected existing schema for migration {version}")
# Migration 004 is harder to detect, assume it's applied if 001-003 are
if "001" in applied and "003" in applied and "004" not in applied:
# Check if background_tasks user exists
table_name = '"Users"' if self.db_type == 'postgresql' else 'Users'
cursor.execute(f"SELECT COUNT(*) FROM {table_name} WHERE Username = %s", ('background_tasks',))
if cursor.fetchone()[0] > 0:
applied.append("004")
logger.info("Detected existing schema for migration 004")
# Check for gpodder tables - if ANY exist, ALL gpodder migrations are applied
# (since they were created by the Go gpodder-api service and haven't changed)
gpodder_indicator_tables = ['"GpodderSyncMigrations"', '"GpodderSyncDeviceState"',
'"GpodderSyncSubscriptions"', '"GpodderSyncSettings"',
'"GpodderSessions"', '"GpodderSyncState"']
gpodder_migration_versions = ["100", "101", "102", "103", "104"]
gpodder_tables_exist = False
for table in gpodder_indicator_tables:
table_name = table.strip('"')
if self.db_type == 'postgresql':
cursor.execute("""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public' AND table_name = %s
)
""", (table_name,))
else: # mysql
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = DATABASE() AND table_name = %s
""", (table_name,))
if cursor.fetchone()[0]:
gpodder_tables_exist = True
break
if gpodder_tables_exist:
for version in gpodder_migration_versions:
if version not in applied:
applied.append(version)
logger.info(f"Detected existing gpodder tables, marking migration {version} as applied")
# Check for PeopleEpisodes_backup table separately (migration 104)
backup_table = "PeopleEpisodes_backup"
if self.db_type == 'postgresql':
cursor.execute("""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public' AND table_name = %s
)
""", (backup_table,))
else: # mysql
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = DATABASE() AND table_name = %s
""", (backup_table,))
if cursor.fetchone()[0] and "104" not in applied:
applied.append("104")
logger.info("Detected existing PeopleEpisodes_backup table, marking migration 104 as applied")
return applied
except Exception as e:
logger.warning(f"Error detecting existing schema: {e}")
return []
finally:
cursor.close()
def run_migrations(self, target_version: Optional[str] = None) -> bool:
"""Run all pending migrations up to target version"""
try:
# Create migration table
self.create_migration_table()
# Get applied migrations
applied_migrations = self.get_applied_migrations()
logger.info(f"Found {len(applied_migrations)} applied migrations")
# If no migrations are recorded but we have existing schema, detect what's there
if not applied_migrations:
detected_migrations = self.detect_existing_schema()
if detected_migrations:
logger.info(f"Detected existing schema, marking {len(detected_migrations)} migrations as applied")
# Record detected migrations without executing them
for version in detected_migrations:
if version in self.migrations:
migration = self.migrations[version]
self.record_migration(migration, 0) # 0ms execution time for pre-existing
# Refresh applied migrations list
applied_migrations = self.get_applied_migrations()
# Sort migrations by version
pending_migrations = []
for version, migration in sorted(self.migrations.items()):
if version not in applied_migrations:
if target_version and version > target_version:
continue
pending_migrations.append(migration)
if not pending_migrations:
logger.info("No pending migrations to apply")
return True
logger.info(f"Found {len(pending_migrations)} pending migrations")
# Execute pending migrations
for migration in pending_migrations:
# Check dependencies
if not self.check_dependencies(migration, applied_migrations):
logger.error(f"Dependency check failed for migration {migration.version}")
return False
# Execute migration
if not self.execute_migration(migration):
logger.error(f"Failed to execute migration {migration.version}")
return False
# Add to applied list for dependency checking
applied_migrations.append(migration.version)
logger.info("All migrations applied successfully")
return True
except Exception as e:
logger.error(f"Migration run failed: {e}")
return False
finally:
self.close_connection()
def validate_migrations(self) -> bool:
"""Validate that applied migrations haven't changed"""
try:
conn = self.get_connection()
cursor = conn.cursor()
table_name = '"schema_migrations"' if self.db_type == 'postgresql' else 'schema_migrations'
cursor.execute(f"SELECT version, checksum FROM {table_name}")
applied_checksums = dict(cursor.fetchall())
validation_errors = []
for version, stored_checksum in applied_checksums.items():
if version in self.migrations:
current_checksum = self.calculate_migration_checksum(self.migrations[version])
if current_checksum != stored_checksum:
validation_errors.append(f"Migration {version} checksum mismatch")
if validation_errors:
for error in validation_errors:
logger.error(error)
return False
logger.info("All migration checksums validated successfully")
return True
except Exception as e:
logger.error(f"Migration validation failed: {e}")
return False
finally:
cursor.close()
# Migration manager instance (singleton pattern)
_migration_manager: Optional[DatabaseMigrationManager] = None
def get_migration_manager() -> DatabaseMigrationManager:
"""Get the global migration manager instance"""
global _migration_manager
if _migration_manager is None:
# Get database configuration from environment
db_type = os.environ.get("DB_TYPE", "postgresql")
if db_type.lower() in ['postgresql', 'postgres']:
connection_params = {
'host': os.environ.get("DB_HOST", "127.0.0.1"),
'port': int(os.environ.get("DB_PORT", "5432")),
'user': os.environ.get("DB_USER", "postgres"),
'password': os.environ.get("DB_PASSWORD", "password"),
'dbname': os.environ.get("DB_NAME", "pinepods_database")
}
else: # mysql/mariadb
connection_params = {
'host': os.environ.get("DB_HOST", "127.0.0.1"),
'port': int(os.environ.get("DB_PORT", "3306")),
'user': os.environ.get("DB_USER", "root"),
'password': os.environ.get("DB_PASSWORD", "password"),
'database': os.environ.get("DB_NAME", "pinepods_database"),
'charset': 'utf8mb4',
'collation': 'utf8mb4_general_ci'
}
_migration_manager = DatabaseMigrationManager(db_type, connection_params)
return _migration_manager
def register_migration(version: str, name: str, description: str, **kwargs):
"""Decorator to register a migration"""
def decorator(func):
migration = Migration(
version=version,
name=name,
description=description,
python_func=func,
**kwargs
)
get_migration_manager().register_migration(migration)
return func
return decorator
def run_all_migrations() -> bool:
"""Run all registered migrations"""
manager = get_migration_manager()
return manager.run_migrations()

View File

@@ -1,795 +0,0 @@
# tasks.py - Define Celery tasks with Valkey as broker
from celery import Celery
import time
import os
import asyncio
import datetime
import requests
from threading import Thread
import json
import sys
import logging
from typing import Dict, Any, Optional, List
# Make sure pinepods is in the Python path
sys.path.append('/pinepods')
database_type = str(os.getenv('DB_TYPE', 'mariadb'))
class Web_Key:
def __init__(self):
self.web_key = None
def get_web_key(self, cnx):
# Import only when needed to avoid circular imports
from database_functions.functions import get_web_key as get_key
self.web_key = get_key(cnx, database_type)
return self.web_key
base_webkey = Web_Key()
# Set up logging
logger = logging.getLogger("celery_tasks")
# Import the WebSocket manager directly from clientapi
try:
from clients.clientapi import manager as websocket_manager
print("Successfully imported WebSocket manager from clientapi")
except ImportError as e:
logger.error(f"Failed to import WebSocket manager: {e}")
websocket_manager = None
# Create a dedicated event loop thread for async operations
_event_loop = None
_event_loop_thread = None
def start_background_loop():
global _event_loop, _event_loop_thread
# Only start if not already running
if _event_loop is None:
# Function to run event loop in background thread
def run_event_loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
global _event_loop
_event_loop = loop
loop.run_forever()
# Start the background thread
_event_loop_thread = Thread(target=run_event_loop, daemon=True)
_event_loop_thread.start()
# Wait a moment for the loop to start
time.sleep(0.1)
print("Started background event loop for WebSocket broadcasts")
# Start the event loop when this module is imported
start_background_loop()
# Use the existing Valkey connection for Celery
valkey_host = os.environ.get("VALKEY_HOST", "localhost")
valkey_port = os.environ.get("VALKEY_PORT", "6379")
broker_url = f"redis://{valkey_host}:{valkey_port}/0"
backend_url = f"redis://{valkey_host}:{valkey_port}/0"
# Initialize Celery with Valkey as broker and result backend
celery_app = Celery('pinepods',
broker=broker_url,
backend=backend_url)
# Configure Celery for best performance with downloads
celery_app.conf.update(
worker_concurrency=3, # Limit to 3 concurrent downloads per worker
task_acks_late=True, # Only acknowledge tasks after they're done
task_time_limit=1800, # 30 minutes time limit
task_soft_time_limit=1500, # 25 minutes soft time limit
worker_prefetch_multiplier=1, # Don't prefetch more tasks than workers
)
# Task status tracking in Valkey for all types of tasks
class TaskManager:
def __init__(self):
from database_functions.valkey_client import valkey_client
self.valkey_client = valkey_client
def register_task(self, task_id: str, task_type: str, user_id: int, item_id: Optional[int] = None,
details: Optional[Dict[str, Any]] = None):
"""Register any Celery task for tracking"""
task_data = {
"task_id": task_id,
"user_id": user_id,
"type": task_type,
"item_id": item_id,
"progress": 0.0,
"status": "PENDING",
"details": details or {},
"started_at": datetime.datetime.now().isoformat()
}
self.valkey_client.set(f"task:{task_id}", json.dumps(task_data))
# Set TTL for 24 hours
self.valkey_client.expire(f"task:{task_id}", 86400)
# Add to user's active tasks list
self._add_to_user_tasks(user_id, task_id)
# Try to broadcast the update if the WebSocket module is available
try:
self._broadcast_update(task_id)
except Exception as e:
logger.error(f"Error broadcasting task update: {e}")
def update_task(self, task_id: str, progress: float = None, status: str = None,
details: Dict[str, Any] = None):
"""Update any task's status and progress"""
task_json = self.valkey_client.get(f"task:{task_id}")
if task_json:
task = json.loads(task_json)
if progress is not None:
task["progress"] = progress
if status:
task["status"] = status
if details:
if "details" not in task:
task["details"] = {}
task["details"].update(details)
self.valkey_client.set(f"task:{task_id}", json.dumps(task))
# Try to broadcast the update
try:
self._broadcast_update(task_id)
except Exception as e:
logger.error(f"Error broadcasting task update: {e}")
def complete_task(self, task_id: str, success: bool = True, result: Any = None):
"""Mark any task as complete or failed"""
task_json = self.valkey_client.get(f"task:{task_id}")
if task_json:
task = json.loads(task_json)
task["progress"] = 100.0 if success else 0.0
task["status"] = "SUCCESS" if success else "FAILED"
task["completed_at"] = datetime.datetime.now().isoformat()
if result is not None:
task["result"] = result
self.valkey_client.set(f"task:{task_id}", json.dumps(task))
# Try to broadcast the final update
try:
self._broadcast_update(task_id)
except Exception as e:
logger.error(f"Error broadcasting task update: {e}")
# Keep completed tasks for 1 hour before expiring
self.valkey_client.expire(f"task:{task_id}", 3600)
# Remove from user's active tasks list after completion
if success:
self._remove_from_user_tasks(task.get("user_id"), task_id)
def get_task(self, task_id: str) -> Dict[str, Any]:
"""Get any task's details"""
task_json = self.valkey_client.get(f"task:{task_id}")
if task_json:
return json.loads(task_json)
return {}
def get_user_tasks(self, user_id: int) -> List[Dict[str, Any]]:
"""Get all active tasks for a user (all types)"""
tasks_list_json = self.valkey_client.get(f"user_tasks:{user_id}")
result = []
if tasks_list_json:
task_ids = json.loads(tasks_list_json)
for task_id in task_ids:
task_info = self.get_task(task_id)
if task_info:
result.append(task_info)
return result
def _add_to_user_tasks(self, user_id: int, task_id: str):
"""Add a task to the user's active tasks list"""
tasks_list_json = self.valkey_client.get(f"user_tasks:{user_id}")
if tasks_list_json:
tasks_list = json.loads(tasks_list_json)
if task_id not in tasks_list:
tasks_list.append(task_id)
else:
tasks_list = [task_id]
self.valkey_client.set(f"user_tasks:{user_id}", json.dumps(tasks_list))
# Set TTL for 7 days
self.valkey_client.expire(f"user_tasks:{user_id}", 604800)
def _remove_from_user_tasks(self, user_id: int, task_id: str):
"""Remove a task from the user's active tasks list"""
tasks_list_json = self.valkey_client.get(f"user_tasks:{user_id}")
if tasks_list_json:
tasks_list = json.loads(tasks_list_json)
if task_id in tasks_list:
tasks_list.remove(task_id)
self.valkey_client.set(f"user_tasks:{user_id}", json.dumps(tasks_list))
# Modified _broadcast_update method to avoid circular imports
def _broadcast_update(self, task_id: str):
"""Broadcast task update via HTTP endpoint"""
# Get task info
task_info = self.get_task(task_id)
if not task_info or "user_id" not in task_info:
return
user_id = task_info["user_id"]
cnx = None
try:
cnx = get_direct_db_connection()
# Import broadcaster - delay import to avoid circular dependency
sys.path.insert(0, '/pinepods/database_functions')
try:
from websocket_broadcaster import broadcaster
except ImportError:
try:
from database_functions.websocket_broadcaster import broadcaster
except ImportError as e:
print(f"Cannot import broadcaster from any location: {e}")
return
# Get web key
web_key = None
try:
# Get web key using class method to avoid direct import
if not base_webkey.web_key:
base_webkey.get_web_key(cnx)
web_key = base_webkey.web_key
except Exception as e:
print(f"Error getting web key: {str(e)}")
# Fallback to a direct approach if needed
try:
from database_functions.functions import get_web_key
web_key = get_web_key(cnx, database_type)
except Exception as e2:
print(f"Fallback web key retrieval failed: {str(e2)}")
return
# Progress and status details for better debugging
progress = task_info.get("progress", 0)
status = task_info.get("status", "unknown")
print(f"Broadcasting task update for user {user_id}, task {task_id}, progress: {progress}, status: {status}")
# Broadcast the update
result = broadcaster.broadcast_task_update(user_id, task_info, web_key)
if result:
print(f"Successfully broadcast task update for task {task_id}, progress: {progress}%")
else:
print(f"Failed to broadcast task update for task {task_id}, progress: {progress}%")
except Exception as e:
print(f"Error in task broadcast setup: {str(e)}")
finally:
if cnx:
# Close direct connection
close_direct_db_connection(cnx)
# Initialize a general task manager
task_manager = TaskManager()
# For backwards compatibility, keep the download_manager name too
download_manager = task_manager
# Function to get all active tasks including both downloads and other task types
def get_all_active_tasks(user_id: int) -> List[Dict[str, Any]]:
"""Get all active tasks for a user (all types)"""
return task_manager.get_user_tasks(user_id)
# ----------------------
# IMPROVED CONNECTION HANDLING
# ----------------------
def get_direct_db_connection():
"""
Create a direct database connection instead of using the pool
This is more reliable for Celery workers to avoid pool exhaustion
"""
db_host = os.environ.get("DB_HOST", "127.0.0.1")
db_port = os.environ.get("DB_PORT", "3306")
db_user = os.environ.get("DB_USER", "root")
db_password = os.environ.get("DB_PASSWORD", "password")
db_name = os.environ.get("DB_NAME", "pypods_database")
print(f"Creating direct database connection for task")
if database_type == "postgresql":
import psycopg
conninfo = f"host={db_host} port={db_port} user={db_user} password={db_password} dbname={db_name}"
return psycopg.connect(conninfo)
else: # Default to MariaDB/MySQL
try:
import mariadb as mysql_connector
except ImportError:
import mysql.connector
return mysql_connector.connect(
host=db_host,
port=db_port,
user=db_user,
password=db_password,
database=db_name
)
def close_direct_db_connection(cnx):
"""Close a direct database connection"""
if cnx:
try:
cnx.close()
print("Direct database connection closed")
except Exception as e:
print(f"Error closing direct connection: {str(e)}")
# Minimal changes to download_podcast_task that should work right away
@celery_app.task(bind=True, max_retries=3)
def download_podcast_task(self, episode_id: int, user_id: int, database_type: str):
"""
Celery task to download a podcast episode.
Uses retries with exponential backoff for handling transient failures.
"""
task_id = self.request.id
print(f"DOWNLOAD TASK STARTED: ID={task_id}, Episode={episode_id}, User={user_id}")
cnx = None
try:
# Get a direct connection to fetch the title first
cnx = get_direct_db_connection()
cursor = cnx.cursor()
# Get the episode title and podcast name
if database_type == "postgresql":
# First try to get both the episode title and podcast name
query = '''
SELECT e."episodetitle", p."podcastname"
FROM "Episodes" e
JOIN "Podcasts" p ON e."podcastid" = p."podcastid"
WHERE e."episodeid" = %s
'''
else:
query = '''
SELECT e.EpisodeTitle, p.PodcastName
FROM Episodes e
JOIN Podcasts p ON e.PodcastID = p.PodcastID
WHERE e.EpisodeID = %s
'''
cursor.execute(query, (episode_id,))
result = cursor.fetchone()
cursor.close()
# Extract episode title and podcast name
episode_title = None
podcast_name = None
if result:
if isinstance(result, dict):
# Dictionary result
if "episodetitle" in result: # PostgreSQL lowercase
episode_title = result["episodetitle"]
podcast_name = result.get("podcastname")
else: # MariaDB uppercase
episode_title = result["EpisodeTitle"]
podcast_name = result.get("PodcastName")
else:
# Tuple result
episode_title = result[0] if len(result) > 0 else None
podcast_name = result[1] if len(result) > 1 else None
# Format a nice display title
display_title = "Unknown Episode"
if episode_title and episode_title != "None" and episode_title.strip():
display_title = episode_title
elif podcast_name:
display_title = f"{podcast_name} - Episode"
else:
display_title = f"Episode #{episode_id}"
print(f"Using display title for episode {episode_id}: {display_title}")
# Register task with more details
task_manager.register_task(
task_id=task_id,
task_type="podcast_download",
user_id=user_id,
item_id=episode_id,
details={
"episode_id": episode_id,
"episode_title": display_title,
"status_text": f"Preparing to download {display_title}"
}
)
# Define a progress callback with the display title
def progress_callback(progress, status=None):
status_message = ""
if status == "DOWNLOADING":
status_message = f"Downloading {display_title}"
elif status == "PROCESSING":
status_message = f"Processing {display_title}"
elif status == "FINALIZING":
status_message = f"Finalizing {display_title}"
task_manager.update_task(task_id, progress, status, {
"episode_id": episode_id,
"episode_title": display_title,
"status_text": status_message
})
# Close the connection used for title lookup
close_direct_db_connection(cnx)
# Get a fresh connection for the download
cnx = get_direct_db_connection()
# Import the download function
from database_functions.functions import download_podcast
print(f"Starting download for episode: {episode_id} ({display_title}), user: {user_id}, task: {task_id}")
# Execute the download with progress reporting
success = download_podcast(
cnx,
database_type,
episode_id,
user_id,
task_id,
progress_callback=progress_callback
)
# Mark task as complete with a nice message
completion_message = f"{'Successfully downloaded' if success else 'Failed to download'} {display_title}"
task_manager.complete_task(
task_id,
success,
{
"episode_id": episode_id,
"episode_title": display_title,
"status_text": completion_message
}
)
return success
except Exception as exc:
print(f"Error downloading podcast {episode_id}: {str(exc)}")
# Mark task as failed
task_manager.complete_task(
task_id,
False,
{
"episode_id": episode_id,
"episode_title": f"Episode #{episode_id}",
"status_text": f"Download failed: {str(exc)}"
}
)
# Retry with exponential backoff (5s, 25s, 125s)
countdown = 5 * (2 ** self.request.retries)
self.retry(exc=exc, countdown=countdown)
finally:
# Always close the connection
if cnx:
close_direct_db_connection(cnx)
@celery_app.task(bind=True, max_retries=3)
def download_youtube_video_task(self, video_id: int, user_id: int, database_type: str):
"""
Celery task to download a YouTube video.
Uses retries with exponential backoff for handling transient failures.
"""
task_id = self.request.id
print(f"YOUTUBE DOWNLOAD TASK STARTED: ID={task_id}, Video={video_id}, User={user_id}")
cnx = None
try:
# Get a direct connection to fetch the title first
cnx = get_direct_db_connection()
cursor = cnx.cursor()
# Get the video title and channel name
if database_type == "postgresql":
# First try to get both the video title and channel name
query = '''
SELECT v."videotitle", p."podcastname"
FROM "YouTubeVideos" v
JOIN "Podcasts" p ON v."podcastid" = p."podcastid"
WHERE v."videoid" = %s
'''
else:
query = '''
SELECT v.VideoTitle, p.PodcastName
FROM YouTubeVideos v
JOIN Podcasts p ON v.PodcastID = p.PodcastID
WHERE v.VideoID = %s
'''
cursor.execute(query, (video_id,))
result = cursor.fetchone()
cursor.close()
# Extract video title and channel name
video_title = None
channel_name = None
if result:
if isinstance(result, dict):
# Dictionary result
if "videotitle" in result: # PostgreSQL lowercase
video_title = result["videotitle"]
channel_name = result.get("podcastname")
else: # MariaDB uppercase
video_title = result["VideoTitle"]
channel_name = result.get("PodcastName")
else:
# Tuple result
video_title = result[0] if len(result) > 0 else None
channel_name = result[1] if len(result) > 1 else None
# Format a nice display title
display_title = "Unknown Video"
if video_title and video_title != "None" and video_title.strip():
display_title = video_title
elif channel_name:
display_title = f"{channel_name} - Video"
else:
display_title = f"YouTube Video #{video_id}"
print(f"Using display title for video {video_id}: {display_title}")
# Register task with more details
task_manager.register_task(
task_id=task_id,
task_type="youtube_download",
user_id=user_id,
item_id=video_id,
details={
"item_id": video_id,
"item_title": display_title,
"status_text": f"Preparing to download {display_title}"
}
)
# Close the connection used for title lookup
close_direct_db_connection(cnx)
# Get a fresh connection for the download
cnx = get_direct_db_connection()
# Import the download function
from database_functions.functions import download_youtube_video
print(f"Starting download for YouTube video: {video_id} ({display_title}), user: {user_id}, task: {task_id}")
# Define a progress callback with the display title
def progress_callback(progress, status=None):
status_message = ""
if status == "DOWNLOADING":
status_message = f"Downloading {display_title}"
elif status == "PROCESSING":
status_message = f"Processing {display_title}"
elif status == "FINALIZING":
status_message = f"Finalizing {display_title}"
task_manager.update_task(task_id, progress, status, {
"item_id": video_id,
"item_title": display_title,
"status_text": status_message
})
# Check if the download_youtube_video function accepts progress_callback parameter
import inspect
try:
signature = inspect.signature(download_youtube_video)
has_progress_callback = 'progress_callback' in signature.parameters
except (TypeError, ValueError):
has_progress_callback = False
# Execute the download with progress callback if supported, otherwise without it
if has_progress_callback:
success = download_youtube_video(
cnx,
database_type,
video_id,
user_id,
task_id,
progress_callback=progress_callback
)
else:
# Call without the progress_callback parameter
success = download_youtube_video(
cnx,
database_type,
video_id,
user_id,
task_id
)
# Since we can't use progress callbacks directly, manually update progress after completion
task_manager.update_task(task_id, 100 if success else 0,
"SUCCESS" if success else "FAILED",
{
"item_id": video_id,
"item_title": display_title,
"status_text": f"{'Download complete' if success else 'Download failed'}"
})
# Mark task as complete with a nice message
completion_message = f"{'Successfully downloaded' if success else 'Failed to download'} {display_title}"
task_manager.complete_task(
task_id,
success,
{
"item_id": video_id,
"item_title": display_title,
"status_text": completion_message
}
)
return success
except Exception as exc:
print(f"Error downloading YouTube video {video_id}: {str(exc)}")
# Mark task as failed but include video title in the details
task_manager.complete_task(
task_id,
False,
{
"item_id": video_id,
"item_title": f"YouTube Video #{video_id}",
"status_text": f"Download failed: {str(exc)}"
}
)
# Retry with exponential backoff (5s, 25s, 125s)
countdown = 5 * (2 ** self.request.retries)
self.retry(exc=exc, countdown=countdown)
finally:
# Always close the connection
if cnx:
close_direct_db_connection(cnx)
@celery_app.task
def queue_podcast_downloads(podcast_id: int, user_id: int, database_type: str, is_youtube: bool = False):
"""
Task to queue individual download tasks for all episodes/videos in a podcast.
This adds downloads to the queue in small batches to prevent overwhelming the system.
"""
cnx = None
try:
# Get a direct connection
cnx = get_direct_db_connection()
from database_functions.functions import (
get_episode_ids_for_podcast,
get_video_ids_for_podcast,
check_downloaded
)
if is_youtube:
item_ids = get_video_ids_for_podcast(cnx, database_type, podcast_id)
print(f"Queueing {len(item_ids)} YouTube videos for download")
# Process YouTube items in batches
batch_size = 5
for i in range(0, len(item_ids), batch_size):
batch = item_ids[i:i+batch_size]
for item_id in batch:
if not check_downloaded(cnx, database_type, user_id, item_id, is_youtube):
download_youtube_video_task.delay(item_id, user_id, database_type)
# Add a small delay between batches
if i + batch_size < len(item_ids):
time.sleep(2)
else:
# Get episode IDs (should return dicts with id and title)
episodes = get_episode_ids_for_podcast(cnx, database_type, podcast_id)
print(f"Queueing {len(episodes)} podcast episodes for download")
# Process episodes in batches
batch_size = 5
for i in range(0, len(episodes), batch_size):
batch = episodes[i:i+batch_size]
for episode in batch:
# Handle both possible formats (dict or simple ID)
if isinstance(episode, dict) and "id" in episode:
episode_id = episode["id"]
else:
# Fall back to treating it as just an ID
episode_id = episode
if not check_downloaded(cnx, database_type, user_id, episode_id, is_youtube):
# Pass just the ID, the task will look up the title
download_podcast_task.delay(episode_id, user_id, database_type)
# Add a small delay between batches
if i + batch_size < len(episodes):
time.sleep(2)
return f"Queued {len(episodes if not is_youtube else item_ids)} items for download"
finally:
if cnx:
close_direct_db_connection(cnx)
# Helper task to clean up old download records
@celery_app.task
def cleanup_old_downloads():
"""
Periodic task to clean up old download records from Valkey
"""
from database_functions.valkey_client import valkey_client
# This would need to be implemented with a scan operation
# For simplicity, we rely on Redis/Valkey TTL mechanisms
print("Running download cleanup task")
# Example task for refreshing podcast feeds
@celery_app.task(bind=True, max_retries=2)
def refresh_feed_task(self, user_id: int, database_type: str):
"""
Celery task to refresh podcast feeds for a user.
"""
task_id = self.request.id
cnx = None
try:
# Register task
task_manager.register_task(
task_id=task_id,
task_type="feed_refresh",
user_id=user_id,
details={"description": "Refreshing podcast feeds"}
)
# Get a direct database connection
cnx = get_direct_db_connection()
# Get list of podcasts to refresh
# Then update progress as each one completes
try:
# Here you would have your actual feed refresh implementation
# with periodic progress updates
task_manager.update_task(task_id, 10, "PROGRESS", {"status_text": "Fetching podcast list"})
# Simulate feed refresh process with progress updates
# Replace with your actual implementation
total_podcasts = 10 # Example count
for i in range(total_podcasts):
# Update progress for each podcast
progress = (i + 1) / total_podcasts * 100
task_manager.update_task(
task_id,
progress,
"PROGRESS",
{"status_text": f"Refreshing podcast {i+1}/{total_podcasts}"}
)
# Simulated work - replace with actual refresh logic
time.sleep(0.5)
# Complete the task
task_manager.complete_task(task_id, True, {"refreshed_count": total_podcasts})
return True
except Exception as e:
raise e
except Exception as exc:
print(f"Error refreshing feeds for user {user_id}: {str(exc)}")
task_manager.complete_task(task_id, False, {"error": str(exc)})
self.retry(exc=exc, countdown=30)
finally:
# Always close the connection
if cnx:
close_direct_db_connection(cnx)
# Simple debug task
@celery_app.task
def debug_task(x, y):
"""Simple debug task that prints output"""
result = x + y
print(f"CELERY DEBUG TASK EXECUTED: {x} + {y} = {result}")
return result

View File

@@ -1,778 +0,0 @@
#!/usr/bin/env python3
"""
Database Validator for PinePods
This script validates that an existing database matches the expected schema
by using the migration system as the source of truth.
Usage:
python validate_database.py --db-type mysql --db-host localhost --db-port 3306 --db-user root --db-password pass --db-name pinepods_database
python validate_database.py --db-type postgresql --db-host localhost --db-port 5432 --db-user postgres --db-password pass --db-name pinepods_database
"""
import argparse
import sys
import os
import tempfile
import logging
from typing import Dict, List, Set, Tuple, Any, Optional
from dataclasses import dataclass
import importlib.util
# Add the parent directory to path so we can import database_functions
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
try:
import mysql.connector
MYSQL_AVAILABLE = True
except ImportError:
MYSQL_AVAILABLE = False
try:
import psycopg
POSTGRESQL_AVAILABLE = True
except ImportError:
POSTGRESQL_AVAILABLE = False
from database_functions.migrations import get_migration_manager
@dataclass
class TableInfo:
"""Information about a database table"""
name: str
columns: Dict[str, Dict[str, Any]]
indexes: Dict[str, Dict[str, Any]]
constraints: Dict[str, Dict[str, Any]]
@dataclass
class ValidationResult:
"""Result of database validation"""
is_valid: bool
missing_tables: List[str]
extra_tables: List[str]
table_differences: Dict[str, Dict[str, Any]]
missing_indexes: List[Tuple[str, str]] # (table, index)
extra_indexes: List[Tuple[str, str]]
missing_constraints: List[Tuple[str, str]] # (table, constraint)
extra_constraints: List[Tuple[str, str]]
column_differences: Dict[str, Dict[str, Dict[str, Any]]] # table -> column -> differences
class DatabaseInspector:
"""Base class for database inspection"""
def __init__(self, connection):
self.connection = connection
def get_tables(self) -> Set[str]:
"""Get all table names"""
raise NotImplementedError
def get_table_info(self, table_name: str) -> TableInfo:
"""Get detailed information about a table"""
raise NotImplementedError
def get_all_table_info(self) -> Dict[str, TableInfo]:
"""Get information about all tables"""
tables = {}
for table_name in self.get_tables():
tables[table_name] = self.get_table_info(table_name)
return tables
class MySQLInspector(DatabaseInspector):
"""MySQL database inspector"""
def get_tables(self) -> Set[str]:
cursor = self.connection.cursor()
cursor.execute("SHOW TABLES")
tables = {row[0] for row in cursor.fetchall()}
cursor.close()
return tables
def get_table_info(self, table_name: str) -> TableInfo:
cursor = self.connection.cursor(dictionary=True)
# Get column information
cursor.execute(f"DESCRIBE `{table_name}`")
columns = {}
for row in cursor.fetchall():
columns[row['Field']] = {
'type': row['Type'],
'null': row['Null'],
'key': row['Key'],
'default': row['Default'],
'extra': row['Extra']
}
# Get index information
cursor.execute(f"SHOW INDEX FROM `{table_name}`")
indexes = {}
for row in cursor.fetchall():
index_name = row['Key_name']
if index_name not in indexes:
indexes[index_name] = {
'columns': [],
'unique': not row['Non_unique'],
'type': row['Index_type']
}
indexes[index_name]['columns'].append(row['Column_name'])
# Get constraint information (foreign keys, etc.)
cursor.execute(f"""
SELECT kcu.CONSTRAINT_NAME, tc.CONSTRAINT_TYPE, kcu.COLUMN_NAME,
kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu
JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS tc
ON kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA
WHERE kcu.TABLE_SCHEMA = DATABASE() AND kcu.TABLE_NAME = %s
AND kcu.REFERENCED_TABLE_NAME IS NOT NULL
""", (table_name,))
constraints = {}
for row in cursor.fetchall():
constraint_name = row['CONSTRAINT_NAME']
constraints[constraint_name] = {
'type': 'FOREIGN KEY',
'column': row['COLUMN_NAME'],
'referenced_table': row['REFERENCED_TABLE_NAME'],
'referenced_column': row['REFERENCED_COLUMN_NAME']
}
cursor.close()
return TableInfo(table_name, columns, indexes, constraints)
class PostgreSQLInspector(DatabaseInspector):
"""PostgreSQL database inspector"""
def get_tables(self) -> Set[str]:
cursor = self.connection.cursor()
cursor.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
""")
tables = {row[0] for row in cursor.fetchall()}
cursor.close()
return tables
def get_table_info(self, table_name: str) -> TableInfo:
cursor = self.connection.cursor()
# Get column information
cursor.execute("""
SELECT column_name, data_type, is_nullable, column_default,
character_maximum_length, numeric_precision, numeric_scale
FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = %s
ORDER BY ordinal_position
""", (table_name,))
columns = {}
for row in cursor.fetchall():
col_name, data_type, is_nullable, default, max_length, precision, scale = row
type_str = data_type
if max_length:
type_str += f"({max_length})"
elif precision:
if scale:
type_str += f"({precision},{scale})"
else:
type_str += f"({precision})"
columns[col_name] = {
'type': type_str,
'null': is_nullable,
'default': default,
'max_length': max_length,
'precision': precision,
'scale': scale
}
# Get index information
cursor.execute("""
SELECT i.relname as index_name,
array_agg(a.attname ORDER BY c.ordinality) as columns,
ix.indisunique as is_unique,
ix.indisprimary as is_primary
FROM pg_class t
JOIN pg_index ix ON t.oid = ix.indrelid
JOIN pg_class i ON i.oid = ix.indexrelid
JOIN unnest(ix.indkey) WITH ORDINALITY c(colnum, ordinality) ON true
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = c.colnum
WHERE t.relname = %s AND t.relkind = 'r'
GROUP BY i.relname, ix.indisunique, ix.indisprimary
""", (table_name,))
indexes = {}
for row in cursor.fetchall():
index_name, columns_list, is_unique, is_primary = row
indexes[index_name] = {
'columns': columns_list,
'unique': is_unique,
'primary': is_primary
}
# Get constraint information
cursor.execute("""
SELECT con.conname as constraint_name,
con.contype as constraint_type,
array_agg(att.attname) as columns,
cl.relname as referenced_table,
array_agg(att2.attname) as referenced_columns
FROM pg_constraint con
JOIN pg_class t ON con.conrelid = t.oid
JOIN pg_attribute att ON att.attrelid = t.oid AND att.attnum = ANY(con.conkey)
LEFT JOIN pg_class cl ON con.confrelid = cl.oid
LEFT JOIN pg_attribute att2 ON att2.attrelid = cl.oid AND att2.attnum = ANY(con.confkey)
WHERE t.relname = %s
GROUP BY con.conname, con.contype, cl.relname
""", (table_name,))
constraints = {}
for row in cursor.fetchall():
constraint_name, constraint_type, columns_list, ref_table, ref_columns = row
constraints[constraint_name] = {
'type': constraint_type,
'columns': columns_list,
'referenced_table': ref_table,
'referenced_columns': ref_columns
}
cursor.close()
return TableInfo(table_name, columns, indexes, constraints)
class DatabaseValidator:
"""Main database validator class"""
def __init__(self, db_type: str, db_config: Dict[str, Any]):
self.db_type = db_type.lower()
# Normalize mariadb to mysql since they use the same connector
if self.db_type == 'mariadb':
self.db_type = 'mysql'
self.db_config = db_config
self.logger = logging.getLogger(__name__)
def create_test_database(self) -> Tuple[Any, str]:
"""Create a temporary database and run all migrations"""
if self.db_type == 'mysql':
return self._create_mysql_test_db()
elif self.db_type == 'postgresql':
return self._create_postgresql_test_db()
else:
raise ValueError(f"Unsupported database type: {self.db_type}")
def _create_mysql_test_db(self) -> Tuple[Any, str]:
"""Create MySQL test database"""
if not MYSQL_AVAILABLE:
raise ImportError("mysql-connector-python is required for MySQL validation")
# Create temporary database name
import uuid
test_db_name = f"pinepods_test_{uuid.uuid4().hex[:8]}"
# Connect to MySQL server
config = self.db_config.copy()
config.pop('database', None) # Remove database from config
config['use_pure'] = True # Use pure Python implementation to avoid auth plugin issues
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
try:
# Create test database
cursor.execute(f"CREATE DATABASE `{test_db_name}` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
cursor.execute(f"USE `{test_db_name}`")
cursor.close()
# Run all migrations
self._run_migrations(conn, 'mysql')
# Create a fresh connection to the test database for schema inspection
config['database'] = test_db_name
test_conn = mysql.connector.connect(**config)
# Close the migration connection
conn.close()
return test_conn, test_db_name
except Exception as e:
if cursor:
cursor.close()
if conn:
conn.close()
raise e
def _create_postgresql_test_db(self) -> Tuple[Any, str]:
"""Create PostgreSQL test database"""
if not POSTGRESQL_AVAILABLE:
raise ImportError("psycopg is required for PostgreSQL validation")
# Create temporary database name
import uuid
test_db_name = f"pinepods_test_{uuid.uuid4().hex[:8]}"
# Connect to PostgreSQL server
config = self.db_config.copy()
config.pop('dbname', None) # Remove database from config
config['dbname'] = 'postgres' # Connect to default database
conn = psycopg.connect(**config)
conn.autocommit = True
cursor = conn.cursor()
try:
# Create test database
cursor.execute(f'CREATE DATABASE "{test_db_name}"')
cursor.close()
conn.close()
# Connect to the new test database
config['dbname'] = test_db_name
test_conn = psycopg.connect(**config)
test_conn.autocommit = True
# Run all migrations
self._run_migrations(test_conn, 'postgresql')
return test_conn, test_db_name
except Exception as e:
cursor.close()
conn.close()
raise e
def _run_migrations(self, conn: Any, db_type: str):
"""Run all migrations on the test database using existing migration system"""
# Set environment variables for the migration manager
import os
original_env = {}
try:
# Backup original environment
for key in ['DB_TYPE', 'DB_HOST', 'DB_PORT', 'DB_USER', 'DB_PASSWORD', 'DB_NAME']:
original_env[key] = os.environ.get(key)
# Set environment for test database
if db_type == 'mysql':
os.environ['DB_TYPE'] = 'mysql'
os.environ['DB_HOST'] = 'localhost' # We'll override the connection
os.environ['DB_PORT'] = '3306'
os.environ['DB_USER'] = 'test'
os.environ['DB_PASSWORD'] = 'test'
os.environ['DB_NAME'] = 'test'
else:
os.environ['DB_TYPE'] = 'postgresql'
os.environ['DB_HOST'] = 'localhost'
os.environ['DB_PORT'] = '5432'
os.environ['DB_USER'] = 'test'
os.environ['DB_PASSWORD'] = 'test'
os.environ['DB_NAME'] = 'test'
# Import and register migrations
import database_functions.migration_definitions
# Get migration manager and override its connection
manager = get_migration_manager()
manager._connection = conn
# Run all migrations
success = manager.run_migrations()
if not success:
raise RuntimeError("Failed to apply migrations")
finally:
# Restore original environment
for key, value in original_env.items():
if value is not None:
os.environ[key] = value
elif key in os.environ:
del os.environ[key]
def validate_database(self) -> ValidationResult:
"""Validate the actual database against the expected schema"""
# Create test database with perfect schema
test_conn, test_db_name = self.create_test_database()
try:
# Connect to actual database
actual_conn = self._connect_to_actual_database()
try:
# Get schema information from both databases
if self.db_type == 'mysql':
expected_inspector = MySQLInspector(test_conn)
actual_inspector = MySQLInspector(actual_conn)
# Extract schemas
expected_schema = expected_inspector.get_all_table_info()
actual_schema = actual_inspector.get_all_table_info()
else:
# For PostgreSQL, create fresh connection for expected schema since migration manager closes it
fresh_test_conn = psycopg.connect(
host=self.db_config['host'],
port=self.db_config['port'],
user=self.db_config['user'],
password=self.db_config['password'],
dbname=test_db_name
)
fresh_test_conn.autocommit = True
try:
expected_inspector = PostgreSQLInspector(fresh_test_conn)
actual_inspector = PostgreSQLInspector(actual_conn)
# Extract schemas
expected_schema = expected_inspector.get_all_table_info()
actual_schema = actual_inspector.get_all_table_info()
finally:
fresh_test_conn.close()
# DEBUG: Print what we're actually comparing
print(f"\n🔍 DEBUG: Expected schema has {len(expected_schema)} tables:")
for table in sorted(expected_schema.keys()):
cols = list(expected_schema[table].columns.keys())
print(f" {table}: {len(cols)} columns - {', '.join(cols[:5])}{'...' if len(cols) > 5 else ''}")
print(f"\n🔍 DEBUG: Actual schema has {len(actual_schema)} tables:")
for table in sorted(actual_schema.keys()):
cols = list(actual_schema[table].columns.keys())
print(f" {table}: {len(cols)} columns - {', '.join(cols[:5])}{'...' if len(cols) > 5 else ''}")
# Check specifically for Playlists table
if 'Playlists' in expected_schema and 'Playlists' in actual_schema:
exp_cols = set(expected_schema['Playlists'].columns.keys())
act_cols = set(actual_schema['Playlists'].columns.keys())
print(f"\n🔍 DEBUG: Playlists comparison:")
print(f" Expected columns: {sorted(exp_cols)}")
print(f" Actual columns: {sorted(act_cols)}")
print(f" Missing from actual: {sorted(exp_cols - act_cols)}")
print(f" Extra in actual: {sorted(act_cols - exp_cols)}")
# Compare schemas
result = self._compare_schemas(expected_schema, actual_schema)
return result
finally:
actual_conn.close()
finally:
# Clean up test database - this will close test_conn
self._cleanup_test_database(test_conn, test_db_name)
def _connect_to_actual_database(self) -> Any:
"""Connect to the actual database"""
if self.db_type == 'mysql':
config = self.db_config.copy()
# Ensure autocommit is enabled for MySQL
config['autocommit'] = True
config['use_pure'] = True # Use pure Python implementation to avoid auth plugin issues
return mysql.connector.connect(**config)
else:
return psycopg.connect(**self.db_config)
def _cleanup_test_database(self, test_conn: Any, test_db_name: str):
"""Clean up the test database"""
try:
# Close the test connection first
if test_conn:
test_conn.close()
if self.db_type == 'mysql':
config = self.db_config.copy()
config.pop('database', None)
config['use_pure'] = True # Use pure Python implementation to avoid auth plugin issues
cleanup_conn = mysql.connector.connect(**config)
cursor = cleanup_conn.cursor()
cursor.execute(f"DROP DATABASE IF EXISTS `{test_db_name}`")
cursor.close()
cleanup_conn.close()
else:
config = self.db_config.copy()
config.pop('dbname', None)
config['dbname'] = 'postgres'
cleanup_conn = psycopg.connect(**config)
cleanup_conn.autocommit = True
cursor = cleanup_conn.cursor()
cursor.execute(f'DROP DATABASE IF EXISTS "{test_db_name}"')
cursor.close()
cleanup_conn.close()
except Exception as e:
self.logger.warning(f"Failed to clean up test database {test_db_name}: {e}")
def _compare_schemas(self, expected: Dict[str, TableInfo], actual: Dict[str, TableInfo]) -> ValidationResult:
"""Compare expected and actual database schemas"""
expected_tables = set(expected.keys())
actual_tables = set(actual.keys())
missing_tables = list(expected_tables - actual_tables)
extra_tables = list(actual_tables - expected_tables)
table_differences = {}
missing_indexes = []
extra_indexes = []
missing_constraints = []
extra_constraints = []
column_differences = {}
# Compare common tables
common_tables = expected_tables & actual_tables
for table_name in common_tables:
expected_table = expected[table_name]
actual_table = actual[table_name]
# Compare columns
table_col_diffs = self._compare_columns(expected_table.columns, actual_table.columns)
if table_col_diffs:
column_differences[table_name] = table_col_diffs
# Compare indexes
expected_indexes = set(expected_table.indexes.keys())
actual_indexes = set(actual_table.indexes.keys())
for missing_idx in expected_indexes - actual_indexes:
missing_indexes.append((table_name, missing_idx))
for extra_idx in actual_indexes - expected_indexes:
extra_indexes.append((table_name, extra_idx))
# Compare constraints
expected_constraints = set(expected_table.constraints.keys())
actual_constraints = set(actual_table.constraints.keys())
for missing_const in expected_constraints - actual_constraints:
missing_constraints.append((table_name, missing_const))
for extra_const in actual_constraints - expected_constraints:
extra_constraints.append((table_name, extra_const))
# Only fail on critical issues:
# - Missing tables (CRITICAL)
# - Missing columns (CRITICAL)
# Extra tables, extra columns, and type differences are warnings only
critical_issues = []
critical_issues.extend(missing_tables)
# Check for missing columns (critical) - but only in expected tables
for table, col_diffs in column_differences.items():
# Skip extra tables entirely - they shouldn't be validated
if table in extra_tables:
continue
for col, diff in col_diffs.items():
if diff['status'] == 'missing':
critical_issues.append(f"missing column {col} in table {table}")
is_valid = len(critical_issues) == 0
return ValidationResult(
is_valid=is_valid,
missing_tables=missing_tables,
extra_tables=extra_tables,
table_differences=table_differences,
missing_indexes=missing_indexes,
extra_indexes=extra_indexes,
missing_constraints=missing_constraints,
extra_constraints=extra_constraints,
column_differences=column_differences
)
def _compare_columns(self, expected: Dict[str, Dict[str, Any]], actual: Dict[str, Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
"""Compare column definitions between expected and actual"""
differences = {}
expected_cols = set(expected.keys())
actual_cols = set(actual.keys())
# Missing columns
for missing_col in expected_cols - actual_cols:
differences[missing_col] = {'status': 'missing', 'expected': expected[missing_col]}
# Extra columns
for extra_col in actual_cols - expected_cols:
differences[extra_col] = {'status': 'extra', 'actual': actual[extra_col]}
# Different columns
for col_name in expected_cols & actual_cols:
expected_col = expected[col_name]
actual_col = actual[col_name]
col_diffs = {}
for key in expected_col:
if key in actual_col and expected_col[key] != actual_col[key]:
col_diffs[key] = {'expected': expected_col[key], 'actual': actual_col[key]}
if col_diffs:
differences[col_name] = {'status': 'different', 'differences': col_diffs}
return differences
def print_validation_report(result: ValidationResult):
"""Print a detailed validation report"""
print("=" * 80)
print("DATABASE VALIDATION REPORT")
print("=" * 80)
# Count critical vs warning issues
critical_issues = []
warning_issues = []
# Missing tables are critical
critical_issues.extend(result.missing_tables)
# Missing columns are critical, others are warnings
for table, col_diffs in result.column_differences.items():
for col, diff in col_diffs.items():
if diff['status'] == 'missing':
critical_issues.append(f"Missing column {col} in table {table}")
else:
warning_issues.append((table, col, diff))
# Extra tables are warnings
warning_issues.extend([('EXTRA_TABLE', table, None) for table in result.extra_tables])
if result.is_valid:
if warning_issues:
print("✅ DATABASE IS VALID - No critical issues found!")
print("⚠️ Some warnings exist but don't affect functionality")
else:
print("✅ DATABASE IS PERFECT - All checks passed!")
else:
print("❌ DATABASE VALIDATION FAILED - Critical issues found")
print()
# Show critical issues
if critical_issues:
print("🔴 CRITICAL ISSUES (MUST BE FIXED):")
if result.missing_tables:
print(" Missing Tables:")
for table in result.missing_tables:
print(f" - {table}")
# Show missing columns
for table, col_diffs in result.column_differences.items():
missing_cols = [col for col, diff in col_diffs.items() if diff['status'] == 'missing']
if missing_cols:
print(f" Missing Columns in {table}:")
for col in missing_cols:
print(f" - {col}")
print()
# Show warnings
if warning_issues:
print("⚠️ WARNINGS (ACCEPTABLE DIFFERENCES):")
if result.extra_tables:
print(" Extra Tables (ignored):")
for table in result.extra_tables:
print(f" - {table}")
# Show column warnings
for table, col_diffs in result.column_differences.items():
table_warnings = []
for col, diff in col_diffs.items():
if diff['status'] == 'extra':
table_warnings.append(f"Extra column: {col}")
elif diff['status'] == 'different':
details = []
for key, values in diff['differences'].items():
details.append(f"{key}: {values}")
table_warnings.append(f"Different column {col}: {', '.join(details)}")
if table_warnings:
print(f" Table {table}:")
for warning in table_warnings:
print(f" - {warning}")
print()
if result.missing_indexes:
print("🟡 MISSING INDEXES:")
for table, index in result.missing_indexes:
print(f" - {table}.{index}")
print()
if result.extra_indexes:
print("🟡 EXTRA INDEXES:")
for table, index in result.extra_indexes:
print(f" - {table}.{index}")
print()
if result.missing_constraints:
print("🟡 MISSING CONSTRAINTS:")
for table, constraint in result.missing_constraints:
print(f" - {table}.{constraint}")
print()
if result.extra_constraints:
print("🟡 EXTRA CONSTRAINTS:")
for table, constraint in result.extra_constraints:
print(f" - {table}.{constraint}")
print()
def main():
"""Main function"""
parser = argparse.ArgumentParser(description='Validate PinePods database schema')
parser.add_argument('--db-type', required=True, choices=['mysql', 'mariadb', 'postgresql'], help='Database type')
parser.add_argument('--db-host', required=True, help='Database host')
parser.add_argument('--db-port', required=True, type=int, help='Database port')
parser.add_argument('--db-user', required=True, help='Database user')
parser.add_argument('--db-password', required=True, help='Database password')
parser.add_argument('--db-name', required=True, help='Database name')
parser.add_argument('--verbose', '-v', action='store_true', help='Enable verbose logging')
args = parser.parse_args()
# Set up logging
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=level, format='%(asctime)s - %(levelname)s - %(message)s')
# Build database config
if args.db_type in ['mysql', 'mariadb']:
db_config = {
'host': args.db_host,
'port': args.db_port,
'user': args.db_user,
'password': args.db_password,
'database': args.db_name,
'charset': 'utf8mb4',
'collation': 'utf8mb4_unicode_ci'
}
else: # postgresql
db_config = {
'host': args.db_host,
'port': args.db_port,
'user': args.db_user,
'password': args.db_password,
'dbname': args.db_name
}
try:
# Create validator and run validation
validator = DatabaseValidator(args.db_type, db_config)
result = validator.validate_database()
# Print report
print_validation_report(result)
# Exit with appropriate code
sys.exit(0 if result.is_valid else 1)
except Exception as e:
logging.error(f"Validation failed with error: {e}")
if args.verbose:
import traceback
traceback.print_exc()
sys.exit(2)
if __name__ == '__main__':
main()

View File

@@ -1,149 +0,0 @@
provider "aws" {
region = "us-east-1" # Choose your preferred region
}
variable "db_username" {
description = "Database administrator username"
type = string
sensitive = true
}
variable "db_password" {
description = "Database administrator password"
type = string
sensitive = true
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
}
resource "aws_subnet" "subnet" {
vpc_id = aws_vpc.main.id
cidr_block = "10.15.0.0/24"
}
resource "aws_security_group" "allow_all" {
vpc_id = aws_vpc.main.id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_rds_instance" "default" {
allocated_storage = 20
engine = "postgres"
engine_version = "12.5"
instance_class = "db.t2.micro"
name = "pinepods-db"
username = var.db_username
password = var.db_password
parameter_group_name = "default.postgres12"
skip_final_snapshot = true
publicly_accessible = true
vpc_security_group_ids = [aws_security_group.allow_all.id]
db_subnet_group_name = aws_db_subnet_group.main.name
}
resource "aws_db_subnet_group" "main" {
name = "main"
subnet_ids = [aws_subnet.subnet.id]
tags = {
Name = "Main subnet group"
}
}
resource "aws_ecs_cluster" "main" {
name = "pinepods-cluster"
}
resource "aws_ecs_task_definition" "pinepods" {
family = "pinepods-task"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = "1"
memory = "4"
execution_role_arn = aws_iam_role.ecs_task_execution_role.arn
container_definitions = <<DEFINITION
[
{
"name": "pinepods",
"image": "madeofpendletonwool/pinepods", # Change this to your Docker image
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 80
}
],
"environment": [
{
"name": "DB_HOST",
"value": "${aws_rds_instance.default.address}"
},
{
"name": "DB_USER",
"value": "admin"
},
{
"name": "DB_PASSWORD",
"value": "password" # Use the same password set for RDS
},
{
"name": "DB_NAME",
"value": "pinepods"
}
]
}
]
DEFINITION
}
resource "aws_ecs_service" "main" {
name = "pinepods-service"
cluster = aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.pinepods.arn
desired_count = 1
launch_type = "FARGATE"
network_configuration {
subnets = [aws_subnet.subnet.id]
security_groups = [aws_security_group.allow_all.id]
assign_public_ip = true
}
}
resource "aws_iam_role" "ecs_task_execution_role" {
name = "ecs_task_execution_role"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
POLICY
managed_policy_arns = [
"arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
]
}

View File

@@ -1,55 +0,0 @@
services:
db:
container_name: db
image: mariadb:12
command: --wait_timeout=1800
environment:
MYSQL_TCP_PORT: 3306
MYSQL_ROOT_PASSWORD: myS3curepass
MYSQL_DATABASE: pinepods_database
MYSQL_COLLATION_SERVER: utf8mb4_unicode_ci
MYSQL_CHARACTER_SET_SERVER: utf8mb4
MYSQL_INIT_CONNECT: "SET @@GLOBAL.max_allowed_packet=64*1024*1024;"
volumes:
- /home/user/pinepods/sql:/var/lib/mysql
restart: always
valkey:
image: valkey/valkey:8-alpine
pinepods:
image: madeofpendletonwool/pinepods:latest
ports:
- "8040:8040"
environment:
# Basic Server Info
SEARCH_API_URL: "https://search.pinepods.online/api/search"
PEOPLE_API_URL: "https://people.pinepods.online"
HOSTNAME: "http://localhost:8040"
# Database Vars
DB_TYPE: mariadb
DB_HOST: db
DB_PORT: 3306
DB_USER: root
DB_PASSWORD: myS3curepass
DB_NAME: pinepods_database
# Valkey Settings
VALKEY_HOST: valkey
VALKEY_PORT: 6379
# Enable or Disable Debug Mode for additional Printing
DEBUG_MODE: false
PUID: ${UID:-911}
PGID: ${GID:-911}
# Add timezone configuration
TZ: "America/New_York"
volumes:
# Mount the download and backup locations on the server
- /home/user/pinepods/downloads:/opt/pinepods/downloads
- /home/user/pinepods/backups:/opt/pinepods/backups
# Timezone volumes, HIGHLY optional. Read the timezone notes below
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
depends_on:
- db
- valkey

View File

@@ -1,60 +0,0 @@
services:
db:
container_name: db
image: mysql:9
command: --wait_timeout=1800
environment:
MYSQL_TCP_PORT: 3306
MYSQL_ROOT_PASSWORD: myS3curepass
MYSQL_DATABASE: pinepods_database
MYSQL_COLLATION_SERVER: utf8mb4_unicode_ci
MYSQL_CHARACTER_SET_SERVER: utf8mb4
MYSQL_INIT_CONNECT: "SET @@GLOBAL.max_allowed_packet=64*1024*1024;"
volumes:
- /home/user/pinepods/sql:/var/lib/mysql
restart: always
valkey:
image: valkey/valkey:8-alpine
pinepods:
image: madeofpendletonwool/pinepods:latest
ports:
- "8040:8040"
environment:
# Basic Server Info
SEARCH_API_URL: "https://search.pinepods.online/api/search"
PEOPLE_API_URL: "https://people.pinepods.online"
HOSTNAME: "http://localhost:8040"
# Database Vars
DB_TYPE: mariadb
DB_HOST: db
DB_PORT: 3306
DB_USER: root
DB_PASSWORD: myS3curepass
DB_NAME: pinepods_database
# Valkey Settings
VALKEY_HOST: valkey
VALKEY_PORT: 6379
# Enable or Disable Debug Mode for additional Printing
DEBUG_MODE: false
PUID: ${UID:-911}
PGID: ${GID:-911}
# Add timezone configuration
TZ: "America/New_York"
# Language Configuration
DEFAULT_LANGUAGE: "en"
volumes:
# Mount the download and the backup location on the server if you want to. You could mount a nas to the downloads folder or something like that.
# The backups directory is used if backups are made on the web version on pinepods. When taking backups on the client version it downloads them locally.
volumes:
# Mount the download and backup locations on the server
- /home/user/pinepods/downloads:/opt/pinepods/downloads
- /home/user/pinepods/backups:/opt/pinepods/backups
# Timezone volumes, HIGHLY optional. Read the timezone notes below
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
depends_on:
- db
- valkey

View File

@@ -1,55 +0,0 @@
services:
db:
container_name: db
image: postgres:17
environment:
POSTGRES_DB: pinepods_database
POSTGRES_USER: postgres
POSTGRES_PASSWORD: myS3curepass
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- /home/user/pinepods/pgdata:/var/lib/postgresql/data
restart: always
valkey:
image: valkey/valkey:8-alpine
restart: always
pinepods:
image: madeofpendletonwool/pinepods:latest
ports:
- "8040:8040"
environment:
# Basic Server Info
SEARCH_API_URL: "https://search.pinepods.online/api/search"
PEOPLE_API_URL: "https://people.pinepods.online"
HOSTNAME: "http://localhost:8040"
# Database Vars
DB_TYPE: postgresql
DB_HOST: db
DB_PORT: 5432
DB_USER: postgres
DB_PASSWORD: myS3curepass
DB_NAME: pinepods_database
# Valkey Settings
VALKEY_HOST: valkey
VALKEY_PORT: 6379
# Enable or Disable Debug Mode for additional Printing
DEBUG_MODE: false
PUID: ${UID:-911}
PGID: ${GID:-911}
# Add timezone configuration
TZ: "America/New_York"
# Language Configuration
DEFAULT_LANGUAGE: "en"
volumes:
# Mount the download and backup locations on the server
- /home/user/pinepods/downloads:/opt/pinepods/downloads
- /home/user/pinepods/backups:/opt/pinepods/backups
# Timezone volumes, HIGHLY optional. Read the timezone notes below
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
restart: always
depends_on:
- db
- valkey

View File

@@ -1,9 +0,0 @@
# db-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: db-secret
type: Opaque
data:
MYSQL_ROOT_PASSWORD: myS3curepass
DB_PASSWORD: myS3curepass

View File

@@ -1,16 +0,0 @@
# env-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: pinepods-config
data:
SEARCH_API_URL: "https://search.pinepods.online/api/search"
USERNAME: "myadminuser01"
PASSWORD: "myS3curepass"
FULLNAME: "Pinepods Admin"
EMAIL: "user@pinepods.online"
DB_TYPE: "postgresql"
DB_HOST: "postgres"
DB_PORT: "5432"
DB_NAME: "pypods_database"
DEBUG_MODE: "False"

View File

@@ -1,118 +0,0 @@
# pinepods-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pinepods
spec:
replicas: 3
selector:
matchLabels:
app: pinepods
template:
metadata:
labels:
app: pinepods
spec:
containers:
- name: pinepods
image: madeofpendletonwool/pinepods:latest
ports:
- containerPort: 8040
env:
- name: SEARCH_API_URL
valueFrom:
configMapKeyRef:
name: pinepods-config
key: SEARCH_API_URL
- name: USERNAME
valueFrom:
configMapKeyRef:
name: pinepods-config
key: USERNAME
- name: PASSWORD
valueFrom:
configMapKeyRef:
name: pinepods-config
key: PASSWORD
- name: FULLNAME
valueFrom:
configMapKeyRef:
name: pinepods-config
key: FULLNAME
- name: EMAIL
valueFrom:
configMapKeyRef:
name: pinepods-config
key: EMAIL
- name: DB_TYPE
valueFrom:
configMapKeyRef:
name: pinepods-config
key: DB_TYPE
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: pinepods-config
key: DB_HOST
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: pinepods-config
key: DB_PORT
- name: DB_USER
value: postgres
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: db-secret
key: DB_PASSWORD
- name: DB_NAME
valueFrom:
configMapKeyRef:
name: pinepods-config
key: DB_NAME
- name: DEBUG_MODE
valueFrom:
configMapKeyRef:
name: pinepods-config
key: DEBUG_MODE
volumeMounts:
- name: downloads
mountPath: /opt/pypods/downloads
- name: backups
mountPath: /opt/pinepods/backups
livenessProbe:
httpGet:
path: /api/pinepods_check
port: 8040
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /api/pinepods_check
port: 8040
initialDelaySeconds: 60
periodSeconds: 30
volumes:
- name: downloads
hostPath:
path: /home/collinp/wait/downloads
- name: backups
hostPath:
path: /home/user/pinepods/backups
---
apiVersion: v1
kind: Service
metadata:
name: pinepods-service
spec:
type: NodePort
selector:
app: pinepods
ports:
- protocol: TCP
port: 8040
targetPort: 8040
nodePort: 30007 # Adjust the NodePort range as needed

View File

@@ -1,53 +0,0 @@
# postgres-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres
spec:
selector:
matchLabels:
app: postgres
strategy:
type: Recreate
template:
metadata:
labels:
app: postgres
spec:
containers:
- image: postgres:latest
name: postgres
env:
- name: POSTGRES_DB
value: pypods_database
- name: POSTGRES_USER
value: postgres
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: db-secret
key: DB_PASSWORD
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
ports:
- containerPort: 5432
name: postgres
volumeMounts:
- name: postgres-persistent-storage
mountPath: /var/lib/postgresql/data
volumes:
- name: postgres-persistent-storage
persistentVolumeClaim:
claimName: postgres-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres
spec:
ports:
- port: 5432
selector:
app: postgres

View File

@@ -1,24 +0,0 @@
# pv-pvc.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/home/user/pgdata"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -1,9 +0,0 @@
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
digest: sha256:283ac4a37bcdaa28adb6114913b92dc5488e215e3f5646165e1304b16abbb746
generated: "2024-11-01T07:54:28.056935878-05:00"

View File

@@ -1,13 +0,0 @@
apiVersion: v2
name: pinepods
version: 0.1.0
description: A Helm chart for deploying Pinepods - A complete podcast management system and allows you to play, download, and keep track of podcasts you enjoy. All self hosted and enjoyed on your own server!
dependencies:
- name: postgresql
version: 15.5.14
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: valkey
version: 2.0.1
repository: https://charts.bitnami.com/bitnami
condition: valkey.enabled

View File

@@ -1,42 +0,0 @@
{{- define "pinepods.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "pinepods.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "pinepods.postgresql.fullname" -}}
{{- printf "%s-postgresql" (include "pinepods.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "pinepods.valkey.fullname" -}}
{{- printf "%s-valkey-primary" (include "pinepods.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "pinepods.labels" -}}
helm.sh/chart: {{ include "pinepods.chart" . }}
{{ include "pinepods.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{- define "pinepods.selectorLabels" -}}
app.kubernetes.io/name: {{ include "pinepods.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "pinepods.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.backend.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "pinepods.fullname" . }}-backend
labels:
{{- include "pinepods.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
spec:
replicas: 1
selector:
matchLabels:
{{- include "pinepods.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: backend
template:
metadata:
labels:
{{- include "pinepods.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: backend
spec:
containers:
- name: backend
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
imagePullPolicy: {{ .Values.backend.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.backend.service.port }}
protocol: TCP
envFrom:
- secretRef:
name: {{ include "pinepods.fullname" . }}-backend
{{- end }}

View File

@@ -1,42 +0,0 @@
{{- if and .Values.backend.enabled .Values.backend.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "pinepods.fullname" . }}-backend
labels:
{{- include "pinepods.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
{{- with .Values.backend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.backend.ingress.className }}
ingressClassName: {{ .Values.backend.ingress.className }}
{{- end }}
{{- if .Values.backend.ingress.tls }}
tls:
{{- range .Values.backend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.backend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "pinepods.fullname" $ }}-backend
port:
number: {{ $.Values.backend.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.backend.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "pinepods.fullname" . }}-backend
labels:
{{- include "pinepods.labels" . | nindent 4 }}
type: Opaque
stringData:
API_KEY: {{ .Values.backend.secrets.apiKey | quote }}
API_SECRET: {{ .Values.backend.secrets.apiSecret | quote }}
YOUTUBE_API_KEY: {{ .Values.backend.secrets.youtubeApiKey | quote }}
{{- end }}

View File

@@ -1,19 +0,0 @@
{{- if .Values.backend.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "pinepods.fullname" . }}-backend
labels:
{{- include "pinepods.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "pinepods.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: backend
{{- end }}

View File

@@ -1,74 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "pinepods.fullname" . }}
labels:
{{- include "pinepods.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: Recreate # Ensures clean volume handling
selector:
matchLabels:
{{- include "pinepods.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "pinepods.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: main
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
envFrom:
- secretRef:
name: {{ include "pinepods.fullname" $ }}-env
env:
{{ if (and (not .Values.postgresql.enabled) (.Values.externalDatabase.existingSecret.enabled)) -}}
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.externalDatabase.existingSecret.name }}
key: {{ .Values.externalDatabase.existingSecret.key }}
{{- end }}
volumeMounts:
{{- if .Values.persistence.enabled }}
- name: downloads
mountPath: /opt/pinepods/downloads
- name: backups
mountPath: /opt/pinepods/backups
{{- end }}
livenessProbe:
httpGet:
path: /api/pinepods_check
port: http
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/pinepods_check
port: http
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumes:
{{- if .Values.persistence.enabled }}
- name: downloads
persistentVolumeClaim:
claimName: {{ if .Values.persistence.downloads.existingClaim }}{{ .Values.persistence.downloads.existingClaim }}{{ else }}{{ include "pinepods.fullname" . }}-downloads{{ end }}
- name: backups
persistentVolumeClaim:
claimName: {{ if .Values.persistence.backups.existingClaim }}{{ .Values.persistence.backups.existingClaim }}{{ else }}{{ include "pinepods.fullname" . }}-backups{{ end }}
{{- end }}

View File

@@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "pinepods.fullname" . }}
labels:
{{- include "pinepods.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.className }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "pinepods.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,53 +0,0 @@
{{- if .Values.podpeople.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "pinepods.fullname" . }}-podpeople
labels:
{{- include "pinepods.labels" . | nindent 4 }}
app.kubernetes.io/component: podpeople
spec:
replicas: 1
selector:
matchLabels:
{{- include "pinepods.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: podpeople
template:
metadata:
labels:
{{- include "pinepods.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: podpeople
spec:
containers:
- name: podpeople
image: "{{ .Values.podpeople.image.repository }}:{{ .Values.podpeople.image.tag }}"
imagePullPolicy: {{ .Values.podpeople.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.podpeople.service.port }}
protocol: TCP
env:
- name: ADMIN_USERNAME
value: {{ .Values.podpeople.auth.adminUsername | quote }}
- name: ADMIN_PASSWORD
value: {{ .Values.podpeople.auth.adminPassword | quote }}
- name: NTFY_URL
value: {{ .Values.podpeople.environment.ntfyUrl | quote }}
- name: NTFY_TOPIC
value: {{ .Values.podpeople.environment.ntfyTopic | quote }}
- name: BASE_URL
value: {{ .Values.podpeople.environment.baseurl | quote }}
- name: SEARCH_API_URL
{{- if .Values.backend.enabled }}
value: "http://{{ include "pinepods.fullname" . }}-backend:{{ .Values.backend.service.port }}"
{{- else }}
value: {{ .Values.podpeople.environment.searchApiUrl | quote }}
{{- end }}
volumeMounts:
- name: data
mountPath: /app/podpeople-data
volumes:
- name: data
persistentVolumeClaim:
claimName: {{ if .Values.podpeople.persistence.existingClaim }}{{ .Values.podpeople.persistence.existingClaim }}{{ else }}{{ include "pinepods.fullname" . }}-podpeople{{ end }}
{{- end }}

View File

@@ -1,42 +0,0 @@
{{- if and .Values.podpeople.enabled .Values.podpeople.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "pinepods.fullname" . }}-podpeople
labels:
{{- include "pinepods.labels" . | nindent 4 }}
app.kubernetes.io/component: podpeople
{{- with .Values.podpeople.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.podpeople.ingress.className }}
ingressClassName: {{ .Values.podpeople.ingress.className }}
{{- end }}
{{- if .Values.podpeople.ingress.tls }}
tls:
{{- range .Values.podpeople.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.podpeople.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "pinepods.fullname" $ }}-podpeople
port:
number: {{ $.Values.podpeople.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,17 +0,0 @@
{{- if and .Values.podpeople.enabled .Values.podpeople.persistence.enabled (not .Values.podpeople.persistence.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "pinepods.fullname" . }}-podpeople
labels:
{{- include "pinepods.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.podpeople.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.podpeople.persistence.size }}
{{- if .Values.podpeople.persistence.storageClass }}
storageClassName: {{ .Values.podpeople.persistence.storageClass }}
{{- end }}
{{- end }}

View File

@@ -1,19 +0,0 @@
{{- if .Values.podpeople.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "pinepods.fullname" . }}-podpeople
labels:
{{- include "pinepods.labels" . | nindent 4 }}
app.kubernetes.io/component: podpeople
spec:
type: {{ .Values.podpeople.service.type }}
ports:
- port: {{ .Values.podpeople.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "pinepods.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: podpeople
{{- end }}

View File

@@ -1,36 +0,0 @@
# templates/pvc.yaml
{{- if and .Values.persistence.enabled (not .Values.persistence.downloads.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "pinepods.fullname" . }}-downloads
labels:
{{- include "pinepods.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.persistence.downloads.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.downloads.size }}
{{- if .Values.persistence.downloads.storageClass }}
storageClassName: {{ .Values.persistence.downloads.storageClass }}
{{- end }}
---
{{- end }}
{{- if and .Values.persistence.enabled (not .Values.persistence.backups.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "pinepods.fullname" . }}-backups
labels:
{{- include "pinepods.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.persistence.backups.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.backups.size }}
{{- if .Values.persistence.backups.storageClass }}
storageClassName: {{ .Values.persistence.backups.storageClass }}
{{- end }}
{{- end }}

View File

@@ -1,40 +0,0 @@
{{- /* Set default environment variables. */ -}}
{{ $env := dict -}}
{{ if .Values.postgresql.enabled }}
{{ $_ := set $env "DB_TYPE" "postgresql" }}
{{ $_ := set $env "DB_HOST" (include "pinepods.postgresql.fullname" .) }}
{{ $_ := set $env "DB_PORT" "5432" }}
{{ $_ := set $env "DB_NAME" "pinepods_database" }}
{{ $_ := set $env "DB_USER" "postgres" }}
{{ $_ := set $env "DB_PASSWORD" .Values.postgresql.auth.password }}
{{ else }}
{{ $_ := set $env "DB_TYPE" .Values.externalDatabase.type }}
{{ $_ := set $env "DB_HOST" .Values.externalDatabase.host }}
{{ $_ := set $env "DB_PORT" .Values.externalDatabase.port }}
{{ $_ := set $env "DB_NAME" .Values.externalDatabase.database }}
{{ $_ := set $env "DB_USER" .Values.externalDatabase.user }}
{{ if not .Values.externalDatabase.existingSecret.enabled -}}
{{ $_ := set $env "DB_PASSWORD" .Values.externalDatabase.password }}
{{ end -}}
{{ end -}}
{{ if .Values.valkey.enabled }}
{{ $_ := set $env "VALKEY_HOST" (include "pinepods.valkey.fullname" .) }}
{{ $_ := set $env "VALKEY_PORT" (.Values.valkey.service.port) }}
{{ end -}}
{{- /* Merge in user-specified environment variables, overriding the above. */ -}}
{{ $env := mergeOverwrite $env .Values.env -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "pinepods.fullname" . }}-env
labels:
{{- include "pinepods.labels" . | nindent 4 }}
type: Opaque
stringData:
{{- range $key, $value := $env }}
{{ $key }}: {{ $value | quote }}
{{- end }}

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "pinepods.fullname" . }}
labels:
{{- include "pinepods.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}
{{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "pinepods.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: main

View File

@@ -1,267 +0,0 @@
# Default values for pinepods.
# This is a YAML-formatted file.
## Number of pinepods pods to run
replicaCount: 1
## Container image configuration
image:
# -- Repository to pull the container image from
repository: madeofpendletonwool/pinepods
# -- Tag of the image to pull
# Default uses 'latest' but it's recommended to use a specific version
tag: latest
# -- Image pull policy
# Defaults to IfNotPresent but consider using Always if using latest tag - You know, if you like living on the edge. You could even use nightly.
pullPolicy: IfNotPresent
## Service configuration for exposing the pinepods application
service:
# -- Kubernetes service type
# Valid values are ClusterIP, NodePort, LoadBalancer
type: ClusterIP
# -- Port the service will listen on
port: 8040
# -- Optional nodePort to use when service type is NodePort
# If not set, Kubernetes will automatically allocate one
# nodePort: 30007
## Ingress configuration for exposing the application to external traffic
ingress:
# -- Enable ingress resource
enabled: true
# -- Ingress class name
className: ""
# -- Additional ingress annotations
annotations: {
traefik.ingress.kubernetes.io/router.entrypoints: web
}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Ingress hosts configuration
hosts:
- host: pinepods.mydomain.com
paths:
- path: /
pathType: Prefix
# -- TLS configuration for ingress
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Persistence configuration
persistence:
# -- Enable persistent storage
enabled: true
downloads:
# -- Storage class for downloads PVC
# If empty, default StorageClass will be used
storageClass: ""
# -- Access mode for downloads PVC
accessMode: ReadWriteOnce
# -- Size of downloads PVC
size: 5Gi
# -- Use existing PVC for downloads
# If set, a new PVC will not be created
existingClaim: ""
backups:
# -- Storage class for backups PVC
storageClass: ""
# -- Access mode for backups PVC
accessMode: ReadWriteOnce
# -- Size of backups PVC
size: 2Gi
# -- Use existing PVC for backups
existingClaim: ""
## PostgreSQL configuration
postgresql:
# -- Enable PostgreSQL deployment
# Set to false if using external database
enabled: true
auth:
# -- PostgreSQL username
username: postgres
# -- PostgreSQL password
# Consider using a secret for production environments
password: "supersecretpassword"
# -- PostgreSQL database name
database: pinepods_database
# -- PostgreSQL resource configuration
# Default values provide good performance for most deployments
# Increase for larger deployments or high concurrent usage
resources:
requests:
# -- Memory request for PostgreSQL container
memory: 512Mi
# -- CPU request for PostgreSQL container
cpu: 250m
limits:
# -- Memory limit for PostgreSQL container
memory: 2Gi
# -- CPU limit for PostgreSQL container
cpu: 1000m
# Run on control planes if needed
# tolerations:
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Exists"
# effect: "NoSchedule"
persistence:
# -- Enable PostgreSQL persistence
enabled: true
# -- Storage class for PostgreSQL PVC
storageClass: ""
# -- Size of PostgreSQL PVC
size: 3Gi
# -- Use existing PVC for PostgreSQL
existingClaim: ""
# External database configuration
# Only used when postgresql.enabled is false
externalDatabase:
type: postgresql
host: ""
port: 5432
user: postgres
password: ""
database: pinepods_database
existingSecret:
enabled: false
name: existing-secret
key: password
resources: {}
## Valkey configuration
valkey:
# -- Enable Valkey deployment
enabled: true
architecture: standalone # This prevents replica creation
auth:
enabled: false
replica:
replicaCount: 0 # Ensure no replicas are created
primary:
persistence:
enabled: false
# Service configuration
service:
# -- Valkey port
port: 6379
## Application environment variables
env:
# -- Search API URL for podcast search functionality - Change these only if you're hosting the backend and the podcast people database yourself
SEARCH_API_URL: "https://search.pinepods.online/api/search"
PEOPLE_API_URL: "https://people.pinepods.online/api/hosts"
# User Configuration
# -- Default admin username
USERNAME: "admin"
# -- Default admin password
PASSWORD: "password"
# -- Admin full name
FULLNAME: "Admin User"
# -- Admin email address
EMAIL: "admin@example.com"
# Valkey Configuration
# -- Valkey host
# This is automatically set in deployment template - do not change
# VALKEY_HOST: "post-valkey"
# -- Valkey port
# This is automatically set in deployment template - do not change
# VALKEY_PORT: "6379"
# Application Configuration
# -- Debug mode
# Set to true for additional logging
DEBUG_MODE: "false"
## Pod Security Context
securityContext: {}
# fsGroup: 2000
# runAsUser: 1000
# runAsNonRoot: true
## Container Security Context
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
## Node selector for pod assignment
nodeSelector: {}
## Pod tolerations
tolerations: []
## Pod affinity
affinity: {}
## Optional Backend configuration
## backend is the itunes and podcast index search API. This is publically maintained at https://search.pinepods.online. If you want to maintain it yourself you can though
backend:
# -- Enable backend deployment
enabled: true
image:
repository: madeofpendletonwool/pinepods_backend
tag: latest
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 5000
# -- Backend secrets
secrets:
apiKey: "MYPODCASTINDEXKEY"
apiSecret: "MYPODCASTINDEXSECRET"
youtubeApiKey: "YOUR_YOUTUBE_API_KEY_HERE"
ingress:
enabled: true
className: ""
annotations: {}
hosts:
- host: backend.mydomain.com
paths:
- path: /
pathType: Prefix
tls: []
## PodPeople DB configuration
## Podpeople is a publically available website in which you can get details on guests and hosts for podcasts that don't maintain podcast 2.0 in their feeds.
## If you do want to maintain it yourself you'll probably want to download a copy of the database here: https://podpeople.pinepods.online
podpeople:
# -- Enable PodPeople DB deployment
enabled: true
image:
repository: madeofpendletonwool/podpeople_db
tag: latest
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 8085
persistence:
enabled: true
storageClass: ""
size: 1Gi
accessMode: ReadWriteOnce
existingClaim: ""
auth:
adminUsername: "admin"
adminPassword: "password"
# Change this only if you aren't hosting the backend. If you aren't you probably want it to be https://search.pinepods.online
searchApiUrl: "http://pinepods-backend:{{ .Values.backend.service.port }}" # Only used if backend.enabled is false
ingress:
enabled: true
className: ""
annotations: {}
hosts:
- host: podpeople.mydomain.com
paths:
- path: /
pathType: Prefix
tls: []

View File

@@ -1,158 +0,0 @@
# Builder stage for compiling the Yew application
FROM rust:alpine AS builder
# Install build dependencies
RUN apk update && apk upgrade && \
apk add --no-cache musl-dev libffi-dev zlib-dev jpeg-dev
RUN apk update && apk upgrade
# Add the Edge Community repository
RUN echo "@edge http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories
# Update the package index
RUN apk update
# Install the desired package from the edge community repository
RUN apk add trunk@edge
# Install wasm target and build tools
RUN rustup target add wasm32-unknown-unknown && \
cargo install wasm-bindgen-cli
# Add application files to the builder stage
COPY ./web/Cargo.lock ./web/Cargo.toml ./web/dev-info.md ./web/index.html ./web/tailwind.config.js ./web/Trunk.toml /app/
COPY ./web/src /app/src
COPY ./web/static /app/static
WORKDIR /app
# Build the Yew application in release mode
RUN RUSTFLAGS="--cfg=web_sys_unstable_apis --cfg getrandom_backend=\"wasm_js\"" trunk build --features server_build --release
# Go builder stage for the gpodder API
FROM golang:alpine AS go-builder
WORKDIR /gpodder-api
# Install build dependencies
RUN apk add --no-cache git
# Copy go module files first for better layer caching
COPY ./gpodder-api/go.mod ./gpodder-api/go.sum ./
RUN go mod download
# Copy the rest of the source code
COPY ./gpodder-api/cmd ./cmd
COPY ./gpodder-api/config ./config
COPY ./gpodder-api/internal ./internal
# Build the application
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o gpodder-api ./cmd/server/
# Python builder stage for database setup
FROM python:3.11-alpine AS python-builder
WORKDIR /build
# Install build dependencies for PyInstaller and MariaDB connector
RUN apk add --no-cache gcc musl-dev libffi-dev openssl-dev mariadb-connector-c-dev
# Copy Python source files
COPY ./database_functions ./database_functions
COPY ./startup/setup_database_new.py ./startup/setup_database_new.py
COPY ./requirements.txt ./requirements.txt
# Install Python dependencies including PyInstaller
RUN pip install --no-cache-dir -r requirements.txt pyinstaller
# Build standalone database setup binary
RUN pyinstaller --onefile \
--name pinepods-db-setup \
--hidden-import psycopg \
--hidden-import mysql.connector \
--hidden-import cryptography \
--hidden-import cryptography.fernet \
--hidden-import passlib \
--hidden-import passlib.hash \
--hidden-import passlib.hash.argon2 \
--hidden-import argon2 \
--hidden-import argon2.exceptions \
--hidden-import argon2.profiles \
--hidden-import argon2._password_hasher \
--add-data "database_functions:database_functions" \
--console \
startup/setup_database_new.py
# Rust API builder stage
FROM rust:alpine AS rust-api-builder
WORKDIR /rust-api
# Install build dependencies
RUN apk add --no-cache musl-dev pkgconfig openssl-dev openssl-libs-static
# Copy Rust API files
COPY ./rust-api/Cargo.toml ./rust-api/Cargo.lock ./
COPY ./rust-api/src ./src
# Set environment for static linking
ENV OPENSSL_STATIC=1
ENV OPENSSL_LIB_DIR=/usr/lib
ENV OPENSSL_INCLUDE_DIR=/usr/include
# Build the Rust API
RUN cargo build --release && strip target/release/pinepods-api
# Final stage for setting up runtime environment
FROM alpine
# Metadata
LABEL maintainer="Collin Pendleton <collinp@collinpendleton.com>"
# Install runtime dependencies
RUN apk add --no-cache tzdata nginx openssl bash mariadb-client postgresql-client curl ffmpeg wget jq mariadb-connector-c-dev
# Download and install latest yt-dlp binary (musllinux for Alpine)
RUN LATEST_VERSION=$(curl -s https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest | jq -r .tag_name) && \
wget -O /usr/local/bin/yt-dlp "https://github.com/yt-dlp/yt-dlp/releases/download/${LATEST_VERSION}/yt-dlp_musllinux" && \
chmod +x /usr/local/bin/yt-dlp
# Download and install Horust (x86_64)
RUN wget -O /tmp/horust.tar.gz "https://github.com/FedericoPonzi/Horust/releases/download/v0.1.7/horust-x86_64-unknown-linux-musl.tar.gz" && \
cd /tmp && tar -xzf horust.tar.gz && \
mv horust /usr/local/bin/ && \
chmod +x /usr/local/bin/horust && \
rm -f /tmp/horust.tar.gz
ENV TZ=UTC
# Copy compiled database setup binary (replaces Python dependency)
COPY --from=python-builder /build/dist/pinepods-db-setup /usr/local/bin/
# Copy built files from the builder stage to the Nginx serving directory
COPY --from=builder /app/dist /var/www/html/
# Copy translation files for the Rust API to access
COPY ./web/src/translations /var/www/html/static/translations
# Copy Go API binary from the go-builder stage
COPY --from=go-builder /gpodder-api/gpodder-api /usr/local/bin/
# Copy Rust API binary from the rust-api-builder stage
COPY --from=rust-api-builder /rust-api/target/release/pinepods-api /usr/local/bin/
# Move to the root directory to execute the startup script
WORKDIR /
# Copy startup scripts
COPY startup/startup.sh /startup.sh
RUN chmod +x /startup.sh
# Copy Pinepods runtime files
RUN mkdir -p /pinepods
RUN mkdir -p /var/log/pinepods/ && mkdir -p /etc/horust/services/
COPY startup/ /pinepods/startup/
# Legacy cron scripts removed - background tasks now handled by internal Rust scheduler
COPY clients/ /pinepods/clients/
COPY database_functions/ /pinepods/database_functions/
RUN chmod +x /pinepods/startup/startup.sh
ENV APP_ROOT=/pinepods
# Define the build argument
ARG PINEPODS_VERSION
# Write the Pinepods version to the current_version file
RUN echo "${PINEPODS_VERSION}" > /pinepods/current_version
# Configure Nginx
COPY startup/nginx.conf /etc/nginx/nginx.conf
# Copy script to start gpodder API
COPY ./gpodder-api/start-gpodder.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/start-gpodder.sh
RUN cp /usr/share/zoneinfo/UTC /etc/localtime && \
echo "UTC" > /etc/timezone
# Expose ports
EXPOSE 8080 8000
# Start everything using the startup script
ENTRYPOINT ["bash", "/startup.sh"]

View File

@@ -1,182 +0,0 @@
# Builder stage for compiling the Yew application
FROM rust:alpine AS builder
# Install build dependencies
RUN apk update && apk upgrade && \
apk add --no-cache musl-dev libffi-dev zlib-dev jpeg-dev
RUN apk update && apk upgrade
# Add the Edge Community repository
RUN echo "@edge http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories
# Update the package index
RUN apk update
# Install the desired package from the edge community repository
RUN apk add trunk@edge
# Install wasm target and build tools
RUN rustup target add wasm32-unknown-unknown && \
cargo install wasm-bindgen-cli && \
cargo install horust --locked
# Test wasm-bindgen installation before full build
RUN echo "Testing wasm-bindgen installation..." && \
which wasm-bindgen && \
wasm-bindgen --version && \
ls -la /usr/local/cargo/bin/ && \
echo "wasm-bindgen test completed"
# Test trunk installation
RUN echo "Testing trunk installation..." && \
which trunk && \
trunk --version && \
echo "trunk test completed"
# Add application files to the builder stage
COPY ./web/Cargo.lock ./web/Cargo.toml ./web/dev-info.md ./web/index.html ./web/tailwind.config.js ./web/Trunk.toml /app/
COPY ./web/src /app/src
COPY ./web/static /app/static
WORKDIR /app
# Test that trunk can find wasm-bindgen before full build
RUN echo "Testing if trunk can find wasm-bindgen..." && \
RUST_LOG=debug trunk build --help && \
echo "trunk can find wasm-bindgen"
# Auto-detect wasm-bindgen version and replace trunk's glibc binary with our musl one
RUN WASM_BINDGEN_VERSION=$(grep -A1 "name = \"wasm-bindgen\"" /app/Cargo.lock | grep "version = " | cut -d'"' -f2) && \
echo "Detected wasm-bindgen version: $WASM_BINDGEN_VERSION" && \
RUSTFLAGS="--cfg=web_sys_unstable_apis --cfg getrandom_backend=\"wasm_js\"" timeout 30 trunk build --features server_build --release || \
(echo "Build failed as expected, replacing downloaded binary..." && \
mkdir -p /root/.cache/trunk/wasm-bindgen-$WASM_BINDGEN_VERSION && \
cp /usr/local/cargo/bin/wasm-bindgen /root/.cache/trunk/wasm-bindgen-$WASM_BINDGEN_VERSION/ && \
echo "Retrying build with musl binary..." && \
RUSTFLAGS="--cfg=web_sys_unstable_apis --cfg getrandom_backend=\"wasm_js\"" trunk build --features server_build --release)
# Go builder stage for the gpodder API
FROM golang:alpine AS go-builder
WORKDIR /gpodder-api
# Install build dependencies
RUN apk add --no-cache git
# Copy go module files first for better layer caching
COPY ./gpodder-api/go.mod ./gpodder-api/go.sum ./
RUN go mod download
# Copy the rest of the source code
COPY ./gpodder-api/cmd ./cmd
COPY ./gpodder-api/config ./config
COPY ./gpodder-api/internal ./internal
# Build the application
RUN CGO_ENABLED=0 GOOS=linux go build -o gpodder-api ./cmd/server/
# Python builder stage for database setup
FROM python:3.11-alpine AS python-builder
WORKDIR /build
# Install build dependencies for PyInstaller and MariaDB connector
RUN apk add --no-cache gcc musl-dev libffi-dev openssl-dev mariadb-connector-c-dev
# Copy Python source files
COPY ./database_functions ./database_functions
COPY ./startup/setup_database_new.py ./startup/setup_database_new.py
COPY ./requirements.txt ./requirements.txt
# Install Python dependencies including PyInstaller
RUN pip install --no-cache-dir -r requirements.txt pyinstaller
# Build standalone database setup binary
RUN pyinstaller --onefile \
--name pinepods-db-setup \
--hidden-import psycopg \
--hidden-import mysql.connector \
--hidden-import cryptography \
--hidden-import cryptography.fernet \
--hidden-import passlib \
--hidden-import passlib.hash \
--hidden-import passlib.hash.argon2 \
--hidden-import argon2 \
--hidden-import argon2.exceptions \
--hidden-import argon2.profiles \
--hidden-import argon2._password_hasher \
--add-data "database_functions:database_functions" \
--console \
startup/setup_database_new.py
# Rust API builder stage
FROM rust:alpine AS rust-api-builder
WORKDIR /rust-api
# Install build dependencies
RUN apk add --no-cache musl-dev pkgconfig openssl-dev openssl-libs-static
# Copy Rust API files
COPY ./rust-api/Cargo.toml ./rust-api/Cargo.lock ./
COPY ./rust-api/src ./src
# Set environment for static linking
ENV OPENSSL_STATIC=1
ENV OPENSSL_LIB_DIR=/usr/lib
ENV OPENSSL_INCLUDE_DIR=/usr/include
# Build the Rust API
RUN cargo build --release
# Final stage for setting up runtime environment
FROM alpine
# Metadata
LABEL maintainer="Collin Pendleton <collinp@collinpendleton.com>"
# Install runtime dependencies
RUN apk add --no-cache tzdata nginx openssl bash mariadb-client postgresql-client curl ffmpeg wget jq mariadb-connector-c-dev
# Download and install latest yt-dlp binary for ARM64 (musllinux for Alpine)
RUN LATEST_VERSION=$(curl -s https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest | jq -r .tag_name) && \
wget -O /usr/local/bin/yt-dlp "https://github.com/yt-dlp/yt-dlp/releases/download/${LATEST_VERSION}/yt-dlp_musllinux_aarch64" && \
chmod +x /usr/local/bin/yt-dlp
# Copy Horust binary from builder stage
COPY --from=builder /usr/local/cargo/bin/horust /usr/local/bin/
ENV TZ=UTC
# Copy compiled database setup binary (replaces Python dependency)
COPY --from=python-builder /build/dist/pinepods-db-setup /usr/local/bin/
# Copy built files from the builder stage to the Nginx serving directory
COPY --from=builder /app/dist /var/www/html/
# Copy translation files for the Rust API to access
COPY ./web/src/translations /var/www/html/static/translations
# Copy Go API binary from the go-builder stage
COPY --from=go-builder /gpodder-api/gpodder-api /usr/local/bin/
# Copy Rust API binary from the rust-api-builder stage
COPY --from=rust-api-builder /rust-api/target/release/pinepods-api /usr/local/bin/
# Move to the root directory to execute the startup script
WORKDIR /
# Copy startup scripts
COPY startup/startup.sh /startup.sh
RUN chmod +x /startup.sh
# Copy Pinepods runtime files
RUN mkdir -p /pinepods
RUN mkdir -p /var/log/pinepods/ && mkdir -p /etc/horust/services/
COPY startup/ /pinepods/startup/
# Legacy cron scripts removed - background tasks now handled by internal Rust scheduler
COPY clients/ /pinepods/clients/
COPY database_functions/ /pinepods/database_functions/
RUN chmod +x /pinepods/startup/startup.sh
ENV APP_ROOT=/pinepods
# Define the build argument
ARG PINEPODS_VERSION
# Write the Pinepods version to the current_version file
RUN echo "${PINEPODS_VERSION}" > /pinepods/current_version
# Configure Nginx
COPY startup/nginx.conf /etc/nginx/nginx.conf
# Copy script to start gpodder API
COPY ./gpodder-api/start-gpodder.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/start-gpodder.sh
RUN cp /usr/share/zoneinfo/UTC /etc/localtime && \
echo "UTC" > /etc/timezone
# Expose ports
EXPOSE 8080 8000
# Start everything using the startup script
ENTRYPOINT ["bash", "/startup.sh"]

View File

@@ -1 +0,0 @@
helm.pinepods.online

View File

@@ -1,118 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Pinepods Helm Chart</title>
<style>
body {
font-family: Arial, sans-serif;
background-color: #f4f4f4;
color: #333;
margin: 0;
padding: 20px;
}
.container {
max-width: 800px;
margin: auto;
background: white;
padding: 20px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
}
h1 {
text-align: center;
}
img {
display: block;
margin: 0 auto;
}
pre {
background: #eee;
padding: 10px;
border-radius: 5px;
}
code {
background: #f9f9f9;
padding: 2px 4px;
border-radius: 3px;
}
a {
color: #1a73e8;
}
</style>
</head>
<body>
<div class="container">
<h1>Pinepods Helm Chart</h1>
<img src="pinepods.png" alt="Pinepods Logo" width="200" />
<p>
Welcome to the Pinepods Helm chart repository. Follow the
instructions below to use the Helm chart.
</p>
<h2>Adding the Repository</h2>
<pre><code>helm repo add pinepods http://helm.pinepods.online/
helm repo update</code></pre>
<h2>Create the namespace</h2>
<pre><code>kubectl create namespace pinepods-namespace</code></pre>
<h2>Customizing Values</h2>
<p>
Create a <code>my-values.yaml</code> file to override default
values:
</p>
<pre><code>replicaCount: 2
image:
repository: pinepods
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8040
nodePort: 30007
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 10Gi
postgresql:
enabled: true
auth:
username: postgres
password: "supersecretpassword"
database: pinepods_database
primary:
persistence:
enabled: true
existingClaim: postgres-pvc
env:
SEARCH_API_URL: "https://search.pinepods.online/api/search"
USERNAME: "admin"
PASSWORD: "password"
FULLNAME: "Admin User"
EMAIL: "admin@example.com"
DB_TYPE: "postgresql"
DB_HOST: "pinepods-postgresql.pinepods-namespace.svc.cluster.local"
DB_PORT: "5432"
DB_USER: "postgres"
DB_NAME: "pinepods_database"
DEBUG_MODE: "false"</code></pre>
<h2>Installing the Chart</h2>
<pre><code>helm install pinepods pinepods/pinepods -f my-values.yaml --namespace pinepods-namespace</code></pre>
<h2>More Information</h2>
<p>
For more information, visit the
<a href="https://github.com/madeofpendletonwool/pinepods"
>GitHub repository</a
>.
</p>
</div>
</body>
</html>

View File

@@ -1,264 +0,0 @@
apiVersion: v1
entries:
pinepods:
- apiVersion: v2
created: "2025-10-30T11:52:41.683959727Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 28e32586ecbdfc1749890007055c61add7b78076cee90980d425113b38b13b9c
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.8.1.tgz
version: 0.8.1
- apiVersion: v2
created: "2025-10-30T11:52:41.67456305Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: e08c788d3d225ca3caef37c030f27d7c25cd4ecc557f7fe2f32215ff7f164ba8
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.8.tgz
version: 0.7.8
- apiVersion: v2
created: "2025-10-30T11:52:41.665123097Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 2956c727f65099059680638f2529f472affe25cbd9d6ad90b593dbf7444d6648
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.7.tgz
version: 0.7.7
- apiVersion: v2
created: "2025-10-30T11:52:41.655783861Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 7b954cac8ed6cdff756090d56fc3c98342f6ca944922f2c910b7e20fb338ce5a
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.6.tgz
version: 0.7.6
- apiVersion: v2
created: "2025-10-30T11:52:41.646315323Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: a69290e9e9051ac4442f19bea78cca08bdd91a831e6c91af06d48eb6b9a07409
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.5.tgz
version: 0.7.5
- apiVersion: v2
created: "2025-10-30T11:52:41.636858843Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 2f5f97abadf581a025315cb288c139274b4411c86f1e46e52944f89e76c23a9c
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.4.tgz
version: 0.7.4
- apiVersion: v2
created: "2025-10-30T11:52:41.626710376Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 47208597c5b52c4d8c9fb659416fe7d679f6fc5a099d9b37caaae0ecfeb33dde
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.3.tgz
version: 0.7.3
- apiVersion: v2
created: "2025-10-30T11:52:41.61704642Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: ef86847694c2291c9ebcd4ec40d4f4680c5b675aafc7c82693c3119f5ae4b43b
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.2.tgz
version: 0.7.2
- apiVersion: v2
created: "2025-10-30T11:52:41.607497209Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 07ba1a2859213a3542e03aa922d0dfd0f61d146cd3938e85216a740c1ee90bd4
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.1.tgz
version: 0.7.1
- apiVersion: v2
created: "2025-10-30T11:52:41.597450971Z"
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
- condition: valkey.enabled
name: valkey
repository: https://charts.bitnami.com/bitnami
version: 2.0.1
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 1815802cc08ed83c3eaedd2ac28d6fbd044a817e22c7e8cda4af38bffcde9d82
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.7.0.tgz
version: 0.7.0
- apiVersion: v2
created: "2025-10-30T11:52:41.588288452Z"
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: c994e0c57c47448718cc849d103ab91f21a427b7580a0ef0b4c4decc613a04ad
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.6.6.tgz
version: 0.6.6
- apiVersion: v2
created: "2025-10-30T11:52:41.584369055Z"
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 82f6f1d9569626aab1920bc574b3616c77aadbf4c9b6bc1d5cc5f51cc3bc41f2
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.6.5.tgz
version: 0.6.5
- apiVersion: v2
created: "2025-10-30T11:52:41.579496258Z"
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: c2ab21d0cb61a2e809432f762aca608723365d87e8840d02b29c08bc105c9a31
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.6.4.tgz
version: 0.6.4
- apiVersion: v2
created: "2025-10-30T11:52:41.574870899Z"
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: 3a0a1b86a6fb22888fead9fc12b84e8845a55ff6736775445028d181318860bf
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.6.3.tgz
version: 0.6.3
- apiVersion: v2
created: "2025-10-30T11:52:41.57094979Z"
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.14
description: A Helm chart for deploying Pinepods - A complete podcast management
system and allows you to play, download, and keep track of podcasts you enjoy.
All self hosted and enjoyed on your own server!
digest: f7148434be4b395aaab8bb76fbe1584f7de8fb981b2471506b33ff11be85f706
name: pinepods
urls:
- https://helm.pinepods.online/pinepods-0.6.2.tgz
version: 0.6.2
generated: "2025-10-30T11:52:41.565858306Z"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

View File

@@ -1,18 +0,0 @@
PinePods is a complete podcast management solution that allows you to host your own podcast server and enjoy a beautiful mobile experience.
Features:
• Self-hosted podcast server synchronization
• Beautiful, intuitive mobile interface
• Download episodes for offline listening
• Chapter support with navigation
• Playlist management
• User statistics and listening history
• Multi-device synchronization
• Search and discovery
• Background audio playback
• Sleep timer and playback speed controls
PinePods gives you complete control over your podcast experience while providing the convenience of modern podcast apps. Perfect for users who want privacy, control, and a great listening experience.
Note: This app requires a PinePods server to be set up. Visit the PinePods GitHub repository for server installation instructions.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 297 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 305 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 386 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 660 KiB

View File

@@ -1 +0,0 @@
A beautiful, self-hosted podcast app with powerful server synchronization

View File

@@ -1 +0,0 @@
PinePods

Some files were not shown because too many files have changed in this diff Show More