destination: type: "s3" bucket: "backup-bucket" path: "database/ date /" lifecycle: "delete after 90 days"
# Python SDK from filecatalyst import Profile, Orchestrator
// Exclude patterns exclude: [ " .tmp", " .partial", "/.*/" // hidden files ] Per-profile bandwidth management with priority queuing: filecatalyst profiles
notifications: on_success: "slack:#backups - Success: size transferred" on_failure: - "pagerduty:Database Backup Failed" - "email:db-admin@company.com"
| Priority | Bandwidth Guarantee | Use Case | |----------|--------------------|----------| | Critical | Min 50%, burst to 100% | DR replication, live event | | High | Min 30%, burst to 70% | Business data sync | | Normal | Min 15%, burst to 50% | End-user transfers | | Low | Min 5%, unlimited when idle | Backup, archival | burst to 100% | DR replication
transfer: adaptive_bandwidth: true min_bandwidth: "20Mbps" max_bandwidth: "200Mbps" compression: "zstd" encryption: "AES-256-GCM" parallel_chunks: 8 verify_checksum: "SHA-256"
# CLI examples filecatalyst profile create --from-template backup \ --source s3://my-bucket/ \ --dest /backup/ \ --schedule "0 3 * * *" filecatalyst profile apply --profile marketing_sync --override bandwidth=200Mbps unlimited when idle | Backup
filecatalyst profile test --profile new_config.yaml --dry-run