[{"data":1,"prerenderedAt":2693},["ShallowReactive",2],{"docs-synology-hyper-backup-eu-s3":3,"docs-related-synology-hyper-backup-eu-s3":502},{"id":4,"title":5,"body":6,"date":485,"description":486,"extension":487,"meta":488,"navigation":489,"path":490,"seo":491,"sitemap":492,"stem":493,"tags":494,"tool":498,"__hash__":501},"docs/docs/synology-hyper-backup-eu-s3.md","Synology Hyper Backup — Scheduled NAS Backups with Versioning and Encryption",{"type":7,"value":8,"toc":471},"minimark",[9,13,18,42,46,63,67,80,94,98,101,197,203,214,220,224,231,238,242,246,293,302,328,332,336,342,365,372,381,385,388,402,406,416,423,430,444,449,466],[10,11,12],"p",{},"Synology's native backup application for DSM. Block-level incremental backups, client-side encryption, multi-version retention, and integrity checking. Backs up shared folders and supported applications (Surveillance Station, Hyper Backup Vault, etc.) to S3-compatible object storage.",[14,15,17],"h3",{"id":16},"_1-prerequisites","1. Prerequisites",[19,20,21,30,36,39],"ul",{},[22,23,24,25,29],"li",{},"Synology NAS running ",[26,27,28],"strong",{},"DSM 7.0 or later",".",[22,31,32,35],{},[26,33,34],{},"Hyper Backup"," installed from Package Center (pre-installed on most models — search \"Hyper Backup\" if missing).",[22,37,38],{},"An admin account on the NAS.",[22,40,41],{},"Your HummingTribe S3 credentials and bucket name.",[14,43,45],{"id":44},"_2-get-your-s3-credentials","2. Get your S3 credentials",[10,47,48,49,54,55,58,59,62],{},"Log in to your ",[50,51,53],"a",{"href":52},"/dashboard","HummingTribe dashboard"," → S3 Storage tab. Copy your ",[26,56,57],{},"Access Key ID"," and reveal your ",[26,60,61],{},"Secret Access Key"," (shown once — save it now). Note your bucket name.",[14,64,66],{"id":65},"_3-create-a-new-backup-task","3. Create a new backup task",[10,68,69,70,72,73,76,77,29],{},"Open ",[26,71,34],{}," from the DSM main menu. Click the ",[26,74,75],{},"+"," icon in the bottom-left → ",[26,78,79],{},"Data backup task",[10,81,82,83,86,87,90,91,29],{},"In the destination wizard, scroll to ",[26,84,85],{},"File Server"," → select ",[26,88,89],{},"S3 Storage"," → ",[26,92,93],{},"Next",[14,95,97],{"id":96},"_4-configure-the-s3-connection","4. Configure the S3 connection",[10,99,100],{},"Fill in the fields:",[102,103,104,117],"table",{},[105,106,107],"thead",{},[108,109,110,114],"tr",{},[111,112,113],"th",{},"Field",[111,115,116],{},"Value",[118,119,120,132,142,152,162,170,178,186],"tbody",{},[108,121,122,126],{},[123,124,125],"td",{},"S3 Server",[123,127,128],{},[129,130,131],"code",{},"Custom Server URL",[108,133,134,137],{},[123,135,136],{},"Server address",[123,138,139],{},[129,140,141],{},"storage.hummingtribe.com",[108,143,144,147],{},[123,145,146],{},"Signature version",[123,148,149],{},[129,150,151],{},"v4",[108,153,154,157],{},[123,155,156],{},"Request style",[123,158,159],{},[129,160,161],{},"Path-style",[108,163,164,167],{},[123,165,166],{},"Access Key",[123,168,169],{},"your Access Key ID",[108,171,172,175],{},[123,173,174],{},"Secret Key",[123,176,177],{},"your Secret Access Key",[108,179,180,183],{},[123,181,182],{},"Bucket name",[123,184,185],{},"your bucket name from the dashboard",[108,187,188,191],{},[123,189,190],{},"Directory",[123,192,193,196],{},[129,194,195],{},"synology-backup"," (or any subfolder name — Hyper Backup will create it)",[10,198,199,202],{},[26,200,201],{},"Path-style is required"," — HummingTribe S3 does not support virtual-hosted-style requests.",[10,204,205,206,209,210,213],{},"If creating a new task, leave ",[26,207,208],{},"Create backup task"," selected. If you are reconnecting to an existing relink-able backup, choose ",[26,211,212],{},"Relink to existing task"," instead.",[10,215,216,217,219],{},"Click ",[26,218,93],{},". Hyper Backup verifies the connection and creates the destination directory.",[14,221,223],{"id":222},"_5-select-source-data","5. Select source data",[10,225,226,227,230],{},"Tick the ",[26,228,229],{},"shared folders"," to back up. Expand each folder to include or exclude specific subfolders if needed.",[10,232,233,234,237],{},"On the ",[26,235,236],{},"Application Backup"," screen, tick any DSM applications you want included (e.g. Hyper Backup Vault, Surveillance Station configuration, Note Station). Application data is backed up consistently — Hyper Backup quiesces the application before reading.",[10,239,216,240,29],{},[26,241,93],{},[14,243,245],{"id":244},"_6-configure-task-settings","6. Configure task settings",[19,247,248,257,263,269,275],{},[22,249,250,253,254],{},[26,251,252],{},"Task name:"," e.g. ",[129,255,256],{},"NAS → HummingTribe",[22,258,259,262],{},[26,260,261],{},"Enable task notification:"," tick this and configure notification settings under Control Panel → Notification if you want email alerts on success/failure.",[22,264,265,268],{},[26,266,267],{},"Compress backup data:"," tick this — saves storage and bandwidth.",[22,270,271,274],{},[26,272,273],{},"Enable transfer encryption:"," tick this — uses TLS for the connection (separate from client-side encryption below).",[22,276,277,280,281,284,285,288,289,292],{},[26,278,279],{},"Enable client-side encryption:"," ",[26,282,283],{},"tick this",". Set a strong ",[26,286,287],{},"Encryption key"," and confirm it. Download the encryption key file (",[129,290,291],{},".pem",") when prompted and store it somewhere safe — outside the NAS.",[10,294,295,298,299,301],{},[26,296,297],{},"If you lose the encryption key, your backups are unrecoverable."," Synology has no way to reset or recover it. Store the ",[129,300,291],{}," file in a password manager or printed in a secure location.",[19,303,304,318],{},[22,305,306,309,310,313,314,317],{},[26,307,308],{},"Enable backup schedule:"," tick this. Set frequency to ",[26,311,312],{},"Daily"," at a time outside business hours (e.g. 02:00). For active NAS environments, set ",[26,315,316],{},"Run backup every:"," to a shorter interval if needed.",[22,319,320,323,324,327],{},[26,321,322],{},"Enable integrity check schedule:"," tick this and set to ",[26,325,326],{},"Monthly",". Hyper Backup will periodically download and verify a sample of backup blocks against the remote.",[10,329,216,330,29],{},[26,331,93],{},[14,333,335],{"id":334},"_7-configure-rotation-and-versioning","7. Configure rotation and versioning",[10,337,233,338,341],{},[26,339,340],{},"Rotation"," screen:",[19,343,344,350,359],{},[22,345,346,349],{},[26,347,348],{},"Enable backup rotation:"," tick this.",[22,351,352,280,355,358],{},[26,353,354],{},"Rotation scheme:",[26,356,357],{},"Smart Recycle"," is the recommended default — keeps hourly versions for 24 hours, daily for a month, weekly thereafter, with automatic thinning.",[22,360,361,364],{},[26,362,363],{},"Number of versions:"," set the maximum (e.g. 256). Hyper Backup deletes the oldest version when this limit is reached.",[10,366,367,368,371],{},"For simpler retention, choose ",[26,369,370],{},"From the earliest versions"," — keeps the N most recent versions and discards older ones.",[10,373,216,374,377,378,29],{},[26,375,376],{},"Apply",". Hyper Backup creates the task and prompts you to run the first backup immediately — click ",[26,379,380],{},"Yes",[14,382,384],{"id":383},"_8-run-the-first-backup-and-verify","8. Run the first backup and verify",[10,386,387],{},"The first backup uploads all selected data and may take hours or days depending on dataset size and upload bandwidth. Monitor progress in the Hyper Backup main window — the task status shows current file, transfer rate, and estimated time remaining.",[10,389,390,391,394,395,90,398,401],{},"After completion, run a manual integrity check: select the task → click the ",[26,392,393],{},"gear icon"," (bottom-left) → ",[26,396,397],{},"Backup Integrity Check",[26,399,400],{},"OK",". Hyper Backup downloads a sample of backup data and verifies its integrity against the local index.",[14,403,405],{"id":404},"_9-restore-from-backup","9. Restore from backup",[10,407,408,409,412,413,29],{},"From the Hyper Backup main window, select the task → click the ",[26,410,411],{},"clock icon"," (top-right) → ",[26,414,415],{},"Data",[10,417,418,419,422],{},"The browser shows your shared folders as they existed at each backup point. Use the ",[26,420,421],{},"time slider"," at the top to choose a restore point.",[10,424,425,426,429],{},"Tick the files or folders to restore → click ",[26,427,428],{},"Restore",". Choose:",[19,431,432,438],{},[22,433,434,437],{},[26,435,436],{},"Restore to original location"," — overwrites current files",[22,439,440,443],{},[26,441,442],{},"Restore to"," — pick a different folder on the NAS",[10,445,216,446,448],{},[26,447,376],{}," to start the restore.",[10,450,451,452,90,454,456,457,459,460,462,463,465],{},"To restore on a different Synology NAS (disaster recovery): install Hyper Backup on the new NAS → ",[26,453,75],{},[26,455,79],{}," → choose ",[26,458,89],{}," → enter the same connection settings → on the wizard, choose ",[26,461,212],{}," → provide your encryption key (",[129,464,291],{}," file or passphrase). Hyper Backup re-indexes the remote backup and lets you restore any version.",[10,467,468,469,29],{},"Manage your bucket and credentials from your ",[50,470,53],{"href":52},{"title":472,"searchDepth":473,"depth":473,"links":474},"",2,[475,477,478,479,480,481,482,483,484],{"id":16,"depth":476,"text":17},3,{"id":44,"depth":476,"text":45},{"id":65,"depth":476,"text":66},{"id":96,"depth":476,"text":97},{"id":222,"depth":476,"text":223},{"id":244,"depth":476,"text":245},{"id":334,"depth":476,"text":335},{"id":383,"depth":476,"text":384},{"id":404,"depth":476,"text":405},"2026-04-22","Configure Synology Hyper Backup on DSM 7+ to back up your NAS to HummingTribe S3 with client-side encryption, versioning, and scheduled rotation.","md",{},true,"/docs/synology-hyper-backup-eu-s3",{"title":5,"description":486},{"loc":490},"docs/synology-hyper-backup-eu-s3",[495,496,497,498,499,500],"s3","backup","setup-guide","synology","nas","hyper-backup","4ToWwWkugJ66KXoUpAJlfNQoxguVCE54nkja777rarM",[503,956,1425],{"id":504,"title":505,"body":506,"date":485,"description":944,"extension":487,"meta":945,"navigation":489,"path":946,"seo":947,"sitemap":948,"stem":949,"tags":950,"tool":952,"__hash__":955},"docs/docs/cyberduck-eu-s3-setup.md","Cyberduck — S3 Browser and Ad-Hoc Transfers (Windows, macOS)",{"type":7,"value":507,"toc":933},[508,515,519,525,547,550,552,560,564,583,589,591,650,656,672,675,679,693,699,703,720,730,736,751,755,758,794,805,809,812,819,836,842,846,853,860,884,887,893,896,900,922,929],[10,509,510,511,514],{},"GUI-based S3 client for Windows and macOS. Best for browsing your bucket, ad-hoc uploads and downloads, manual restores, and inspecting backup contents. ",[26,512,513],{},"Not a scheduled backup tool"," — for automated backups, use restic, rclone, or Duplicati. Cyberduck complements those tools rather than replacing them.",[14,516,518],{"id":517},"_1-install-cyberduck","1. Install Cyberduck",[10,520,521,522,29],{},"Download the installer from ",[26,523,524],{},"cyberduck.io",[19,526,527,537],{},[22,528,529,532,533,536],{},[26,530,531],{},"Windows:"," run the ",[129,534,535],{},".exe"," installer.",[22,538,539,542,543,546],{},[26,540,541],{},"macOS:"," mount the ",[129,544,545],{},".zip"," and drag Cyberduck to Applications.",[10,548,549],{},"Cyberduck is donationware — free to use, with a nag screen on launch unless you purchase a registration key from the Mac App Store or Microsoft Store.",[14,551,45],{"id":44},[10,553,48,554,54,556,58,558,62],{},[50,555,53],{"href":52},[26,557,57],{},[26,559,61],{},[14,561,563],{"id":562},"_3-create-a-new-bookmark","3. Create a new bookmark",[10,565,566,567,570,571,574,575,578,579,582],{},"Open Cyberduck → ",[26,568,569],{},"Bookmark"," menu → ",[26,572,573],{},"New Bookmark"," (or press ",[129,576,577],{},"Cmd+Shift+B"," / ",[129,580,581],{},"Ctrl+Shift+B",").",[10,584,585,586,29],{},"In the bookmark editor, set the connection type at the top to ",[26,587,588],{},"S3 (HTTPS)",[10,590,100],{},[102,592,593,601],{},[105,594,595],{},[108,596,597,599],{},[111,598,113],{},[111,600,116],{},[118,602,603,613,622,632,638],{},[108,604,605,608],{},[123,606,607],{},"Nickname",[123,609,610],{},[129,611,612],{},"HummingTribe",[108,614,615,618],{},[123,616,617],{},"Server",[123,619,620],{},[129,621,141],{},[108,623,624,627],{},[123,625,626],{},"Port",[123,628,629],{},[129,630,631],{},"443",[108,633,634,636],{},[123,635,57],{},[123,637,169],{},[108,639,640,643],{},[123,641,642],{},"Path",[123,644,645,646,649],{},"your bucket name (e.g. ",[129,647,648],{},"my-bucket",")",[10,651,216,652,655],{},[26,653,654],{},"More Options"," to expand advanced settings:",[19,657,658,666],{},[22,659,660,280,663],{},[26,661,662],{},"Transfer Files:",[129,664,665],{},"Use browser connection",[22,667,668,671],{},[26,669,670],{},"Connect Mode:"," leave default",[10,673,674],{},"Close the bookmark editor — Cyberduck saves automatically.",[14,676,678],{"id":677},"_4-connect-and-authenticate","4. Connect and authenticate",[10,680,681,682,684,685,688,689,692],{},"Double-click the bookmark in the main browser window. Cyberduck prompts for your ",[26,683,61],{}," — paste it and tick ",[26,686,687],{},"Add to Keychain"," (macOS) or ",[26,690,691],{},"Save Password"," (Windows) so you don't have to re-enter it.",[10,694,216,695,698],{},[26,696,697],{},"Login",". Cyberduck connects to HummingTribe and shows the contents of your bucket. An empty bucket shows a blank file list.",[14,700,702],{"id":701},"_5-upload-and-download-files","5. Upload and download files",[10,704,705,708,709,712,713,578,716,719],{},[26,706,707],{},"Upload:"," drag files or folders from Finder/Explorer into the Cyberduck window. Transfers run in the ",[26,710,711],{},"Transfers"," window (",[129,714,715],{},"Cmd+T",[129,717,718],{},"Ctrl+T",") with per-file progress.",[10,721,722,725,726,729],{},[26,723,724],{},"Download:"," drag files from Cyberduck to your desktop, or right-click → ",[26,727,728],{},"Download To..."," to pick a destination.",[10,731,732,735],{},[26,733,734],{},"Resume interrupted transfers:"," Cyberduck automatically detects partial transfers and offers to resume on next connect.",[10,737,738,739,90,742,90,744,90,747,750],{},"For large multi-gigabyte uploads, Cyberduck uses S3 multipart uploads automatically. The default chunk size is 10 MB — adjust under ",[26,740,741],{},"Preferences",[26,743,711],{},[26,745,746],{},"General",[26,748,749],{},"Multipart download/upload"," if you need to.",[14,752,754],{"id":753},"_6-browse-and-inspect-backups","6. Browse and inspect backups",[10,756,757],{},"Cyberduck is the easiest way to verify what your backup tools have written:",[19,759,760,766,776,788],{},[22,761,762,765],{},[26,763,764],{},"Path navigation:"," click into folders to drill down. Use the breadcrumb bar at the top to jump back up.",[22,767,768,771,772,775],{},[26,769,770],{},"File info:"," right-click any file → ",[26,773,774],{},"Info"," to see size, modification date, storage class, and S3 metadata.",[22,777,778,280,781,578,784,787],{},[26,779,780],{},"Search:",[129,782,783],{},"Cmd+F",[129,785,786],{},"Ctrl+F"," filters the current folder by name.",[22,789,790,793],{},[26,791,792],{},"Sort:"," click column headers (Filename, Size, Modified) to sort.",[10,795,796,797,800,801,804],{},"This is particularly useful for confirming restic snapshot directories, rclone sync results, or Duplicati ",[129,798,799],{},".dblock"," and ",[129,802,803],{},".dindex"," files are present in the bucket.",[14,806,808],{"id":807},"_7-restore-a-single-file","7. Restore a single file",[10,810,811],{},"Restoring an individual file from a structured backup (restic, Duplicati) requires the original tool — those tools store data in their own internal format and cannot be browsed file-by-file in Cyberduck.",[10,813,814,815,818],{},"Cyberduck restores work for files uploaded ",[26,816,817],{},"directly"," as files (e.g. via rclone copy, or manual uploads). To restore:",[820,821,822,825,830],"ol",{},[22,823,824],{},"Navigate to the file in the bucket.",[22,826,827,828],{},"Right-click → ",[26,829,728],{},[22,831,832,833,29],{},"Choose a local destination → ",[26,834,835],{},"Choose",[10,837,838,839,841],{},"For an entire folder, right-click the folder → ",[26,840,728],{}," — Cyberduck downloads the folder tree recursively.",[14,843,845],{"id":844},"_8-optional-client-side-encryption-with-cryptomator","8. Optional — client-side encryption with Cryptomator",[10,847,848,849,852],{},"Cyberduck integrates with ",[26,850,851],{},"Cryptomator"," for transparent client-side encryption. Files are encrypted on your machine before upload — HummingTribe never sees the plaintext.",[10,854,855,856,859],{},"Install Cryptomator from ",[26,857,858],{},"cryptomator.org",". Then in Cyberduck:",[820,861,862,865,871,881],{},[22,863,864],{},"Connect to your bucket (step 4).",[22,866,867,868,29],{},"Right-click in the browser pane → ",[26,869,870],{},"New Encrypted Vault",[22,872,873,874,877,878,29],{},"Choose a vault name (e.g. ",[129,875,876],{},"vault",") and a strong ",[26,879,880],{},"passphrase",[22,882,883],{},"Cyberduck creates the Cryptomator vault structure in your bucket.",[10,885,886],{},"After creation, Cyberduck shows a virtual unlocked vault. Files dragged in are encrypted before upload; files dragged out are decrypted on download. The vault passphrase is required on every reconnection.",[10,888,889,892],{},[26,890,891],{},"If you lose the vault passphrase, the files are unrecoverable."," Cryptomator has no recovery mechanism. Store the passphrase in a password manager.",[10,894,895],{},"This is a useful pattern for sensitive ad-hoc files — but for full automated backups, use restic or Duplicati's built-in encryption instead.",[14,897,899],{"id":898},"_9-sync-one-off-manual","9. Sync (one-off, manual)",[10,901,902,903,906,907,909,910,913,914,917,918,921],{},"Cyberduck has a ",[26,904,905],{},"Synchronize"," feature (right-click bookmark → ",[26,908,905],{},") that compares a local folder to a remote folder and offers three modes: ",[26,911,912],{},"Download"," (remote → local), ",[26,915,916],{},"Upload"," (local → remote), or ",[26,919,920],{},"Mirror"," (both directions).",[10,923,924,925,928],{},"This is useful for occasional one-off sync operations, but ",[26,926,927],{},"it is not scheduled and not incremental"," — every sync rescans the entire folder tree. For automated, deduplicated, scheduled sync, use rclone instead.",[10,930,468,931,29],{},[50,932,53],{"href":52},{"title":472,"searchDepth":473,"depth":473,"links":934},[935,936,937,938,939,940,941,942,943],{"id":517,"depth":476,"text":518},{"id":44,"depth":476,"text":45},{"id":562,"depth":476,"text":563},{"id":677,"depth":476,"text":678},{"id":701,"depth":476,"text":702},{"id":753,"depth":476,"text":754},{"id":807,"depth":476,"text":808},{"id":844,"depth":476,"text":845},{"id":898,"depth":476,"text":899},"Use Cyberduck to browse, upload, and restore files in HummingTribe S3. GUI-based S3 client for Windows and macOS, with Cryptomator integration for client-side encryption.",{},"/docs/cyberduck-eu-s3-setup",{"title":505,"description":944},{"loc":946},"docs/cyberduck-eu-s3-setup",[495,951,497,952,953,954],"browser","cyberduck","windows","macos","gAR0zLszGpWSm3n8aqLR8C4rKjE39jAjRg7z7uAQzHk",{"id":957,"title":958,"body":959,"date":485,"description":1416,"extension":487,"meta":1417,"navigation":489,"path":1418,"seo":1419,"sitemap":1420,"stem":1421,"tags":1422,"tool":1423,"__hash__":1424},"docs/docs/duplicati-eu-s3-setup.md","Duplicati — Automated Encrypted Backups (Windows, macOS, Linux)",{"type":7,"value":960,"toc":1404},[961,964,968,974,1002,1036,1043,1045,1053,1057,1068,1079,1083,1094,1100,1104,1108,1121,1123,1212,1220,1224,1227,1262,1271,1275,1282,1304,1308,1312,1323,1330,1333,1346,1350,1357,1364,1368,1373,1387,1396,1400],[10,962,963],{},"Free, open-source, cross-platform backup with a web-based GUI. Built-in AES-256 encryption, block-level deduplication, and scheduling. Runs on Windows, macOS, and Linux.",[14,965,967],{"id":966},"_1-install-duplicati","1. Install Duplicati",[10,969,970,971,29],{},"Download the latest installer for your platform from ",[26,972,973],{},"duplicati.com/download",[19,975,976,984,992],{},[22,977,978,532,980,983],{},[26,979,531],{},[129,981,982],{},".msi"," installer. Duplicati installs as a tray application and a background service.",[22,985,986,542,988,991],{},[26,987,541],{},[129,989,990],{},".dmg"," and drag Duplicati to Applications.",[22,993,994,997,998,1001],{},[26,995,996],{},"Linux (Debian/Ubuntu):"," install the ",[129,999,1000],{},".deb"," package:",[1003,1004,1008],"pre",{"className":1005,"code":1006,"language":1007,"meta":472,"style":472},"language-bash shiki shiki-themes github-light github-dark","sudo apt install ./duplicati_*.deb\n","bash",[129,1009,1010],{"__ignoreMap":472},[1011,1012,1015,1019,1023,1026,1029,1033],"span",{"class":1013,"line":1014},"line",1,[1011,1016,1018],{"class":1017},"sScJk","sudo",[1011,1020,1022],{"class":1021},"sZZnC"," apt",[1011,1024,1025],{"class":1021}," install",[1011,1027,1028],{"class":1021}," ./duplicati_",[1011,1030,1032],{"class":1031},"sj4cs","*",[1011,1034,1035],{"class":1021},".deb\n",[10,1037,1038,1039,1042],{},"On first launch, Duplicati opens the web UI at ",[129,1040,1041],{},"http://localhost:8200",". All configuration happens through the browser.",[14,1044,45],{"id":44},[10,1046,48,1047,54,1049,58,1051,62],{},[50,1048,53],{"href":52},[26,1050,57],{},[26,1052,61],{},[14,1054,1056],{"id":1055},"_3-create-a-new-backup-job","3. Create a new backup job",[10,1058,1059,1060,90,1063,90,1066,29],{},"In the Duplicati web UI, click ",[26,1061,1062],{},"Add backup",[26,1064,1065],{},"Configure a new backup",[26,1067,93],{},[10,1069,1070,1071,1074,1075,1078],{},"Enter a ",[26,1072,1073],{},"Name"," for the job (e.g. ",[129,1076,1077],{},"Laptop → HummingTribe",") and an optional description.",[14,1080,1082],{"id":1081},"_4-configure-encryption","4. Configure encryption",[10,1084,1085,1086,1089,1090,1093],{},"On the same page, leave encryption set to ",[26,1087,1088],{},"AES-256 encryption, built in",". Enter a strong ",[26,1091,1092],{},"Passphrase"," and confirm it.",[10,1095,1096,1099],{},[26,1097,1098],{},"If you lose this passphrase, your backups are unrecoverable"," — Duplicati has no password reset. Store it in a password manager.",[10,1101,216,1102,29],{},[26,1103,93],{},[14,1105,1107],{"id":1106},"_5-configure-the-s3-destination","5. Configure the S3 destination",[10,1109,233,1110,1113,1114,1117,1118,29],{},[26,1111,1112],{},"Destination"," screen, set ",[26,1115,1116],{},"Storage Type"," to ",[26,1119,1120],{},"S3 Compatible",[10,1122,100],{},[102,1124,1125,1133],{},[105,1126,1127],{},[108,1128,1129,1131],{},[111,1130,113],{},[111,1132,116],{},[118,1134,1135,1144,1152,1158,1166,1176,1188,1195,1202],{},[108,1136,1137,1139],{},[123,1138,617],{},[123,1140,1141],{},[129,1142,1143],{},"Custom server URL",[108,1145,1146,1148],{},[123,1147,1143],{},[123,1149,1150],{},[129,1151,141],{},[108,1153,1154,1156],{},[123,1155,182],{},[123,1157,185],{},[108,1159,1160,1163],{},[123,1161,1162],{},"Bucket create region",[123,1164,1165],{},"leave blank",[108,1167,1168,1171],{},[123,1169,1170],{},"Storage class",[123,1172,1173],{},[129,1174,1175],{},"(Default)",[108,1177,1178,1181],{},[123,1179,1180],{},"Folder path",[123,1182,1183,1184,1187],{},"leave blank (or e.g. ",[129,1185,1186],{},"laptop-backup"," for a subfolder)",[108,1189,1190,1193],{},[123,1191,1192],{},"AWS Access ID",[123,1194,169],{},[108,1196,1197,1200],{},[123,1198,1199],{},"AWS Access Key",[123,1201,177],{},[108,1203,1204,1207],{},[123,1205,1206],{},"Client library to use",[123,1208,1209],{},[129,1210,1211],{},"Amazon AWS SDK",[10,1213,216,1214,1217,1218,29],{},[26,1215,1216],{},"Test connection",". Duplicati will verify credentials and confirm the bucket is reachable. If prompted to use path-style URLs, accept — HummingTribe requires path-style access. Click ",[26,1219,93],{},[14,1221,1223],{"id":1222},"_6-select-source-data","6. Select source data",[10,1225,1226],{},"Expand the filesystem tree and tick the folders you want to back up. Typical selections:",[19,1228,1229,1243,1254],{},[22,1230,1231,280,1233,1236,1237,1236,1240],{},[26,1232,531],{},[129,1234,1235],{},"C:\\Users\\\u003Cname>\\Documents",", ",[129,1238,1239],{},"Desktop",[129,1241,1242],{},"Pictures",[22,1244,1245,280,1247,1236,1250,1236,1252],{},[26,1246,541],{},[129,1248,1249],{},"/Users/\u003Cname>/Documents",[129,1251,1239],{},[129,1253,1242],{},[22,1255,1256,280,1259],{},[26,1257,1258],{},"Linux:",[129,1260,1261],{},"/home/\u003Cuser>",[10,1263,1264,1265,1268,1269,29],{},"Use the ",[26,1266,1267],{},"Filters"," tab to exclude caches, virtual machines, or large files you don't need backed up. Click ",[26,1270,93],{},[14,1272,1274],{"id":1273},"_7-set-schedule","7. Set schedule",[10,1276,1277,1278,1281],{},"Enable ",[26,1279,1280],{},"Automatically run backups"," and set:",[19,1283,1284,1290,1298],{},[22,1285,1286,1289],{},[26,1287,1288],{},"Next time:"," today's date and a time after hours (e.g. 02:00)",[22,1291,1292,280,1295],{},[26,1293,1294],{},"Run again every:",[129,1296,1297],{},"1 Days",[22,1299,1300,1303],{},[26,1301,1302],{},"Allowed days:"," all",[10,1305,216,1306,29],{},[26,1307,93],{},[14,1309,1311],{"id":1310},"_8-set-retention-policy","8. Set retention policy",[10,1313,1314,1315,1318,1319,1322],{},"Under ",[26,1316,1317],{},"Backup retention",", pick a policy. ",[26,1320,1321],{},"Smart backup retention"," is the sensible default — it keeps one backup per day for the last week, one per week for the last month, and one per month for the last year.",[10,1324,1325,1326,1329],{},"For more control, choose ",[26,1327,1328],{},"Custom backup retention"," and enter a policy string like:\n7D:1D,4W:1W,12M:1M",[10,1331,1332],{},"This reads as: keep one version per day for 7 days, one per week for 4 weeks, one per month for 12 months.",[10,1334,1335,1336,1117,1339,1342,1343,29],{},"Set ",[26,1337,1338],{},"Remote volume size",[129,1340,1341],{},"50 MB"," (default) for most connections. Click ",[26,1344,1345],{},"Save",[14,1347,1349],{"id":1348},"_9-run-the-first-backup-and-verify","9. Run the first backup and verify",[10,1351,1352,1353,1356],{},"From the backup job's panel, click ",[26,1354,1355],{},"Run now",". The first backup uploads all selected data and will take time proportional to the dataset size and your upload bandwidth. Subsequent backups only upload changed blocks.",[10,1358,1359,1360,1363],{},"After completion, click ",[26,1361,1362],{},"Verify files"," on the job panel. Duplicati downloads a sample of backup volumes and checks their integrity against the local database. Run this periodically — a backup you haven't verified is a backup you don't have.",[14,1365,1367],{"id":1366},"_10-restore-from-backup","10. Restore from backup",[10,1369,216,1370,1372],{},[26,1371,428],{}," in the left sidebar → select the backup job → choose a restore point (date/time) → tick the files or folders to restore.",[10,1374,1375,1376,1379,1380,1383,1384,1386],{},"Choose a restore destination — ",[26,1377,1378],{},"Original location"," (overwrites existing files) or ",[26,1381,1382],{},"Pick location"," (restores to a new folder). Click ",[26,1385,428],{}," and wait for completion.",[10,1388,1389,1390,90,1392,1395],{},"To restore on a different machine (disaster recovery), install Duplicati, choose ",[26,1391,428],{},[26,1393,1394],{},"Direct restore from backup files",", enter the same S3 destination settings, and provide your encryption passphrase. Duplicati will rebuild the local database from the remote backup and let you restore any snapshot.",[10,1397,468,1398,29],{},[50,1399,53],{"href":52},[1401,1402,1403],"style",{},"html pre.shiki code .sScJk, html code.shiki .sScJk{--shiki-default:#6F42C1;--shiki-dark:#B392F0}html pre.shiki code .sZZnC, html code.shiki .sZZnC{--shiki-default:#032F62;--shiki-dark:#9ECBFF}html pre.shiki code .sj4cs, html code.shiki .sj4cs{--shiki-default:#005CC5;--shiki-dark:#79B8FF}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .dark .shiki span {color: var(--shiki-dark);background: var(--shiki-dark-bg);font-style: var(--shiki-dark-font-style);font-weight: var(--shiki-dark-font-weight);text-decoration: var(--shiki-dark-text-decoration);}html.dark .shiki span {color: var(--shiki-dark);background: var(--shiki-dark-bg);font-style: var(--shiki-dark-font-style);font-weight: var(--shiki-dark-font-weight);text-decoration: var(--shiki-dark-text-decoration);}",{"title":472,"searchDepth":473,"depth":473,"links":1405},[1406,1407,1408,1409,1410,1411,1412,1413,1414,1415],{"id":966,"depth":476,"text":967},{"id":44,"depth":476,"text":45},{"id":1055,"depth":476,"text":1056},{"id":1081,"depth":476,"text":1082},{"id":1106,"depth":476,"text":1107},{"id":1222,"depth":476,"text":1223},{"id":1273,"depth":476,"text":1274},{"id":1310,"depth":476,"text":1311},{"id":1348,"depth":476,"text":1349},{"id":1366,"depth":476,"text":1367},"Set up Duplicati for scheduled, encrypted backups to HummingTribe S3. GUI-based, cross-platform, with built-in AES-256 encryption and retention policies.",{},"/docs/duplicati-eu-s3-setup",{"title":958,"description":1416},{"loc":1418},"docs/duplicati-eu-s3-setup",[495,496,497,1423],"duplicati","zjrwGDNPVZ1ODbnCXI5olSoyy9c5g4KFeFiLGYcJhhs",{"id":1426,"title":1427,"body":1428,"date":485,"description":2682,"extension":487,"meta":2683,"navigation":489,"path":2684,"seo":2685,"sitemap":2686,"stem":2687,"tags":2688,"tool":2689,"__hash__":2692},"docs/docs/proxmox-backup-server-s3-offsite.md","Proxmox Backup Server offsite to S3 — full setup guide",{"type":7,"value":1429,"toc":2662},[1430,1441,1447,1452,1486,1490,1500,1507,1510,1528,1535,1539,1543,1546,1552,1558,1562,1569,1574,1579,1582,1586,1589,1614,1617,1621,1631,1637,1702,1712,1722,1732,1792,1795,1811,1815,1818,1851,1858,1862,1874,1879,1913,1938,1941,1954,1957,1961,1971,1974,1997,2000,2004,2007,2026,2038,2061,2132,2135,2200,2230,2237,2241,2244,2247,2265,2272,2280,2287,2291,2298,2320,2327,2330,2350,2354,2357,2373,2376,2380,2383,2386,2433,2436,2499,2502,2534,2540,2544,2557,2584,2594,2600,2621,2635,2639,2659],[10,1431,1432,1433,1436,1437,1440],{},"Proxmox Backup Server 4.0 (August 2025) introduced native S3 object storage as a datastore backend. This replaces the old pattern of mounting S3 with ",[129,1434,1435],{},"s3fs-fuse"," or running third-party proxies like ",[129,1438,1439],{},"pmoxs3backuproxy"," — approaches Proxmox never officially supported. This guide covers both supported deployment patterns, with exact commands and the caveats that matter for production use.",[10,1442,1443,1446],{},[26,1444,1445],{},"What you'll configure:"," An S3 endpoint, an S3-backed datastore with local cache, and — optionally — a sync job that keeps a local datastore and an S3 datastore in step for true 3-2-1 backups.",[1448,1449,1451],"h2",{"id":1450},"prerequisites","Prerequisites",[19,1453,1454,1460,1477,1480,1483],{},[22,1455,1456,1457,29],{},"Proxmox Backup Server 4.0 or newer (4.1.6+ recommended). Check with ",[129,1458,1459],{},"proxmox-backup-manager versions",[22,1461,1462,1463,1236,1466,1236,1469,1472,1473,1476],{},"An S3-compatible object store with a pre-created bucket and an access key pair with ",[129,1464,1465],{},"GetObject",[129,1467,1468],{},"PutObject",[129,1470,1471],{},"ListBucket",", and ",[129,1474,1475],{},"DeleteObject"," permissions on that bucket. PBS does not create buckets or manage ACLs.",[22,1478,1479],{},"A dedicated disk, partition, or ZFS dataset for the local cache. Proxmox recommends 64–128 GiB. Do not reuse an existing PBS datastore path.",[22,1481,1482],{},"Outbound HTTPS to the S3 endpoint. Plain HTTP is rejected. Self-signed certificates require the TLS fingerprint in config.",[22,1484,1485],{},"Stable bandwidth to the S3 endpoint. Initial seeding writes a large chunk of your datastore to S3.",[1448,1487,1489],{"id":1488},"how-the-s3-backend-actually-works","How the S3 backend actually works",[1491,1492,1493],"blockquote",{},[10,1494,1495,1496,1499],{},"⚠️ ",[26,1497,1498],{},"The S3 datastore backend is marked technology preview in PBS 4.1.6."," It works and is reasonable to use for secondary or offsite copies, but run restore tests frequently and watch the Proxmox release notes before trusting it as your only copy.",[10,1501,1502,1503,1506],{},"PBS does not put self-contained snapshots into the bucket. It uses the same ",[26,1504,1505],{},"content-addressable chunk store"," model it uses locally: each backup is split into deduplicated, compressed, optionally encrypted chunks identified by hash. Those chunks are written to S3 as individual objects, prefixed by the datastore name. Index files that map chunks back to snapshots are also stored as objects.",[10,1508,1509],{},"Two consequences:",[820,1511,1512,1518],{},[22,1513,1514,1517],{},[26,1515,1516],{},"Dedup and compression still work."," You don't pay the S3 storage cost of full copies.",[22,1519,1520,1523,1524,1527],{},[26,1521,1522],{},"You cannot restore directly from the bucket."," A PBS instance is always required. The good news: if you lose the PBS host, you can point a fresh PBS install at the same bucket with the same datastore name and ",[129,1525,1526],{},"--reuse-datastore true --overwrite-in-use true",", and your backups are recoverable.",[10,1529,1530,1531,1534],{},"A ",[26,1532,1533],{},"local cache"," is mandatory. PBS keeps recently-read chunks and index metadata on disk so garbage collection, verification, and reads don't hit S3 for every operation. Without the cache, cost and latency would both be unusable.",[1448,1536,1538],{"id":1537},"two-deployment-patterns","Two deployment patterns",[14,1540,1542],{"id":1541},"pattern-a-s3-as-the-only-datastore","Pattern A — S3 as the only datastore",[10,1544,1545],{},"PBS writes directly to the S3 datastore. No local chunk storage beyond the cache.",[10,1547,1548,1551],{},[26,1549,1550],{},"Use when:"," homelab, small deployments, or secondary PBS acting purely as offsite target. Simplest to set up.",[10,1553,1554,1557],{},[26,1555,1556],{},"Trade-off:"," every backup, restore, verification, and GC operation touches S3. Initial backup speeds are bound by your upstream bandwidth. Restores are bound by downstream.",[14,1559,1561],{"id":1560},"pattern-b-local-datastore-s3-datastore-sync-job","Pattern B — Local datastore + S3 datastore + sync job",[10,1563,1564,1565,1568],{},"Backups land on a local datastore first (fast). A scheduled ",[26,1566,1567],{},"sync job on the same PBS instance"," pulls from local to S3 for offsite retention. You get both copies from one PBS host.",[10,1570,1571,1573],{},[26,1572,1550],{}," you want backup speed to match local disk throughput and need an automated offsite copy. This is the recommended pattern for MSPs and production use.",[10,1575,1576,1578],{},[26,1577,1556],{}," more storage required on the PBS host, slightly more complex.",[10,1580,1581],{},"The rest of this guide sets up the S3 endpoint and datastore once. Both patterns diverge only at the final step (whether you point PVE at the S3 datastore directly, or configure a sync job).",[1448,1583,1585],{"id":1584},"step-1-create-the-s3-bucket-and-credentials","Step 1 — Create the S3 bucket and credentials",[10,1587,1588],{},"At your S3 provider, create:",[19,1590,1591,1597,1604],{},[22,1592,1530,1593,1596],{},[26,1594,1595],{},"bucket"," dedicated to PBS. Do not share it with other tools — PBS manages object lifecycle itself.",[22,1598,1599,1600,1603],{},"An ",[26,1601,1602],{},"access key pair"," scoped to that bucket only. Save the secret key; most providers only show it once.",[22,1605,1606,1607,1610,1611,29],{},"Note the ",[26,1608,1609],{},"region"," identifier and the ",[26,1612,1613],{},"endpoint URL",[10,1615,1616],{},"If your provider supports object versioning or object lock, enable it on the bucket for ransomware protection. PBS never modifies existing chunks, but a compromised client with delete permissions could — versioning gives you a recovery window.",[1448,1618,1620],{"id":1619},"step-2-configure-the-s3-endpoint-in-pbs","Step 2 — Configure the S3 endpoint in PBS",[10,1622,1623,1626,1627,1630],{},[26,1624,1625],{},"Via the web UI:"," Navigate to ",[26,1628,1629],{},"Configuration → Remotes → S3 Endpoints → Add",". Fill in name, access key, secret, endpoint URL, region, and (for self-signed providers) fingerprint.",[10,1632,1633,1636],{},[26,1634,1635],{},"Via CLI"," — this is the pattern most providers use:",[1003,1638,1640],{"className":1005,"code":1639,"language":1007,"meta":472,"style":472},"proxmox-backup-manager s3 endpoint create my-s3-ep \\\n  --access-key 'YOUR_ACCESS_KEY' \\\n  --secret-key 'YOUR_SECRET_KEY' \\\n  --endpoint '{{bucket}}.s3.{{region}}.example.com' \\\n  --region eu-central-1\n",[129,1641,1642,1662,1672,1682,1693],{"__ignoreMap":472},[1011,1643,1644,1647,1650,1653,1656,1659],{"class":1013,"line":1014},[1011,1645,1646],{"class":1017},"proxmox-backup-manager",[1011,1648,1649],{"class":1021}," s3",[1011,1651,1652],{"class":1021}," endpoint",[1011,1654,1655],{"class":1021}," create",[1011,1657,1658],{"class":1021}," my-s3-ep",[1011,1660,1661],{"class":1031}," \\\n",[1011,1663,1664,1667,1670],{"class":1013,"line":473},[1011,1665,1666],{"class":1031},"  --access-key",[1011,1668,1669],{"class":1021}," 'YOUR_ACCESS_KEY'",[1011,1671,1661],{"class":1031},[1011,1673,1674,1677,1680],{"class":1013,"line":476},[1011,1675,1676],{"class":1031},"  --secret-key",[1011,1678,1679],{"class":1021}," 'YOUR_SECRET_KEY'",[1011,1681,1661],{"class":1031},[1011,1683,1685,1688,1691],{"class":1013,"line":1684},4,[1011,1686,1687],{"class":1031},"  --endpoint",[1011,1689,1690],{"class":1021}," '{{bucket}}.s3.{{region}}.example.com'",[1011,1692,1661],{"class":1031},[1011,1694,1696,1699],{"class":1013,"line":1695},5,[1011,1697,1698],{"class":1031},"  --region",[1011,1700,1701],{"class":1021}," eu-central-1\n",[10,1703,1704,1705,800,1708,1711],{},"The ",[129,1706,1707],{},"{{bucket}}",[129,1709,1710],{},"{{region}}"," placeholders are expanded automatically when PBS makes requests. This gives you one endpoint config that works across multiple buckets.",[10,1713,1714,1717,1718,1721],{},[26,1715,1716],{},"Vhost vs path style:"," PBS defaults to vhost-style addressing (bucket as subdomain). If your provider requires path-style (bucket in the URL path), add ",[129,1719,1720],{},"--path-style true",". Cloudflare R2 and some self-hosted providers need this.",[10,1723,1724,1727,1728,1731],{},[26,1725,1726],{},"Self-signed certificates:"," Add ",[129,1729,1730],{},"--fingerprint 'XX:XX:XX:...'",". Get the fingerprint with:",[1003,1733,1735],{"className":1005,"code":1734,"language":1007,"meta":472,"style":472},"openssl s_client -connect your-s3-endpoint:443 -servername your-s3-endpoint \u003C /dev/null 2>/dev/null | \\\n  openssl x509 -fingerprint -sha256 -noout\n",[129,1736,1737,1775],{"__ignoreMap":472},[1011,1738,1739,1742,1745,1748,1751,1754,1757,1761,1764,1767,1770,1773],{"class":1013,"line":1014},[1011,1740,1741],{"class":1017},"openssl",[1011,1743,1744],{"class":1021}," s_client",[1011,1746,1747],{"class":1031}," -connect",[1011,1749,1750],{"class":1021}," your-s3-endpoint:443",[1011,1752,1753],{"class":1031}," -servername",[1011,1755,1756],{"class":1021}," your-s3-endpoint",[1011,1758,1760],{"class":1759},"szBVR"," \u003C",[1011,1762,1763],{"class":1021}," /dev/null",[1011,1765,1766],{"class":1759}," 2>",[1011,1768,1769],{"class":1021},"/dev/null",[1011,1771,1772],{"class":1759}," |",[1011,1774,1661],{"class":1031},[1011,1776,1777,1780,1783,1786,1789],{"class":1013,"line":473},[1011,1778,1779],{"class":1017},"  openssl",[1011,1781,1782],{"class":1021}," x509",[1011,1784,1785],{"class":1031}," -fingerprint",[1011,1787,1788],{"class":1031}," -sha256",[1011,1790,1791],{"class":1031}," -noout\n",[10,1793,1794],{},"Verify the endpoint:",[1003,1796,1798],{"className":1005,"code":1797,"language":1007,"meta":472,"style":472},"proxmox-backup-manager s3 endpoint list\n",[129,1799,1800],{"__ignoreMap":472},[1011,1801,1802,1804,1806,1808],{"class":1013,"line":1014},[1011,1803,1646],{"class":1017},[1011,1805,1649],{"class":1021},[1011,1807,1652],{"class":1021},[1011,1809,1810],{"class":1021}," list\n",[1448,1812,1814],{"id":1813},"step-3-prepare-the-local-cache","Step 3 — Prepare the local cache",[10,1816,1817],{},"The cache must live on a dedicated path. A ZFS dataset with a quota is the cleanest option:",[1003,1819,1821],{"className":1005,"code":1820,"language":1007,"meta":472,"style":472},"zfs create -o mountpoint=/mnt/datastore/s3-cache rpool/s3-cache\nzfs set quota=128G rpool/s3-cache\n",[129,1822,1823,1839],{"__ignoreMap":472},[1011,1824,1825,1828,1830,1833,1836],{"class":1013,"line":1014},[1011,1826,1827],{"class":1017},"zfs",[1011,1829,1655],{"class":1021},[1011,1831,1832],{"class":1031}," -o",[1011,1834,1835],{"class":1021}," mountpoint=/mnt/datastore/s3-cache",[1011,1837,1838],{"class":1021}," rpool/s3-cache\n",[1011,1840,1841,1843,1846,1849],{"class":1013,"line":473},[1011,1842,1827],{"class":1017},[1011,1844,1845],{"class":1021}," set",[1011,1847,1848],{"class":1021}," quota=128G",[1011,1850,1838],{"class":1021},[10,1852,1853,1854,1857],{},"Or use a dedicated partition mounted at ",[129,1855,1856],{},"/mnt/datastore/s3-cache",". Whatever you choose, do not use an existing datastore path — PBS will reject it.",[1448,1859,1861],{"id":1860},"step-4-create-the-s3-backed-datastore","Step 4 — Create the S3-backed datastore",[10,1863,1864,280,1866,1869,1870,1873],{},[26,1865,1625],{},[26,1867,1868],{},"Datastore → Add Datastore",", select ",[26,1871,1872],{},"S3"," as backend, pick your endpoint from the dropdown, set bucket name and cache path.",[10,1875,1876],{},[26,1877,1878],{},"Via CLI:",[1003,1880,1882],{"className":1005,"code":1881,"language":1007,"meta":472,"style":472},"proxmox-backup-manager datastore create s3-offsite \\\n  /mnt/datastore/s3-cache \\\n  --backend type=s3,client=my-s3-ep,bucket=pbs-offsite-bucket\n",[129,1883,1884,1898,1905],{"__ignoreMap":472},[1011,1885,1886,1888,1891,1893,1896],{"class":1013,"line":1014},[1011,1887,1646],{"class":1017},[1011,1889,1890],{"class":1021}," datastore",[1011,1892,1655],{"class":1021},[1011,1894,1895],{"class":1021}," s3-offsite",[1011,1897,1661],{"class":1031},[1011,1899,1900,1903],{"class":1013,"line":473},[1011,1901,1902],{"class":1021},"  /mnt/datastore/s3-cache",[1011,1904,1661],{"class":1031},[1011,1906,1907,1910],{"class":1013,"line":476},[1011,1908,1909],{"class":1031},"  --backend",[1011,1911,1912],{"class":1021}," type=s3,client=my-s3-ep,bucket=pbs-offsite-bucket\n",[19,1914,1915,1921,1926,1932],{},[22,1916,1917,1920],{},[129,1918,1919],{},"s3-offsite"," is the datastore name. It becomes the prefix for all objects in the bucket, so pick something stable — renaming later means reseeding.",[22,1922,1923,1925],{},[129,1924,1856],{}," is the local cache path.",[22,1927,1928,1931],{},[129,1929,1930],{},"client=my-s3-ep"," references the endpoint you created in Step 2.",[22,1933,1934,1937],{},[129,1935,1936],{},"bucket=pbs-offsite-bucket"," is the S3 bucket name.",[10,1939,1940],{},"List datastores to confirm:",[1003,1942,1944],{"className":1005,"code":1943,"language":1007,"meta":472,"style":472},"proxmox-backup-manager datastore list\n",[129,1945,1946],{"__ignoreMap":472},[1011,1947,1948,1950,1952],{"class":1013,"line":1014},[1011,1949,1646],{"class":1017},[1011,1951,1890],{"class":1021},[1011,1953,1810],{"class":1021},[10,1955,1956],{},"The datastore is now usable. At this point your config diverges depending on pattern.",[1448,1958,1960],{"id":1959},"pattern-a-use-s3-datastore-directly-from-pve","Pattern A — Use S3 datastore directly from PVE",[10,1962,1963,1964,1967,1968,1970],{},"In Proxmox VE: ",[26,1965,1966],{},"Datacenter → Storage → Add → Proxmox Backup Server",". Enter your PBS IP/hostname, credentials, the datastore name (",[129,1969,1919],{},"), and the PBS server fingerprint.",[10,1972,1973],{},"Get the PBS fingerprint:",[1003,1975,1977],{"className":1005,"code":1976,"language":1007,"meta":472,"style":472},"proxmox-backup-manager cert info | grep Fingerprint\n",[129,1978,1979],{"__ignoreMap":472},[1011,1980,1981,1983,1986,1989,1991,1994],{"class":1013,"line":1014},[1011,1982,1646],{"class":1017},[1011,1984,1985],{"class":1021}," cert",[1011,1987,1988],{"class":1021}," info",[1011,1990,1772],{"class":1759},[1011,1992,1993],{"class":1017}," grep",[1011,1995,1996],{"class":1021}," Fingerprint\n",[10,1998,1999],{},"Backup jobs in PVE that target this storage now write directly to S3 via the PBS local cache.",[1448,2001,2003],{"id":2002},"pattern-b-local-datastore-sync-job-to-s3","Pattern B — Local datastore + sync job to S3",[10,2005,2006],{},"Create a local datastore alongside the S3 one (if you don't already have one):",[1003,2008,2010],{"className":1005,"code":2009,"language":1007,"meta":472,"style":472},"proxmox-backup-manager datastore create local-pbs /mnt/datastore/local-pbs\n",[129,2011,2012],{"__ignoreMap":472},[1011,2013,2014,2016,2018,2020,2023],{"class":1013,"line":1014},[1011,2015,1646],{"class":1017},[1011,2017,1890],{"class":1021},[1011,2019,1655],{"class":1021},[1011,2021,2022],{"class":1021}," local-pbs",[1011,2024,2025],{"class":1021}," /mnt/datastore/local-pbs\n",[10,2027,2028,2029,2032,2033,2035,2036,29],{},"Point your PVE backup jobs at ",[129,2030,2031],{},"local-pbs",". Now configure a pull sync job that mirrors snapshots from ",[129,2034,2031],{}," into ",[129,2037,1919],{},[10,2039,2040,2041,2044,2045,2048,2049,2052,2053,2056,2057,2060],{},"PBS sync jobs are designed to pull from a ",[26,2042,2043],{},"Remote"," (another PBS instance). To sync between two datastores on the same PBS host, the supported approach is to create a Remote that points back at ",[129,2046,2047],{},"localhost",". Create a dedicated API token for the sync user first (",[26,2050,2051],{},"Configuration → Access Control → API Token",", role ",[129,2054,2055],{},"DatastoreReader"," on ",[129,2058,2059],{},"/datastore/local-pbs","), then:",[1003,2062,2064],{"className":1005,"code":2063,"language":1007,"meta":472,"style":472},"proxmox-backup-manager remote create self \\\n  --host 127.0.0.1 \\\n  --userid 'sync@pbs!syncjob' \\\n  --password 'YOUR_API_TOKEN_SECRET' \\\n  --fingerprint \"$(proxmox-backup-manager cert info | awk '/Fingerprint/ {print $3}')\"\n",[129,2065,2066,2080,2090,2100,2110],{"__ignoreMap":472},[1011,2067,2068,2070,2073,2075,2078],{"class":1013,"line":1014},[1011,2069,1646],{"class":1017},[1011,2071,2072],{"class":1021}," remote",[1011,2074,1655],{"class":1021},[1011,2076,2077],{"class":1021}," self",[1011,2079,1661],{"class":1031},[1011,2081,2082,2085,2088],{"class":1013,"line":473},[1011,2083,2084],{"class":1031},"  --host",[1011,2086,2087],{"class":1031}," 127.0.0.1",[1011,2089,1661],{"class":1031},[1011,2091,2092,2095,2098],{"class":1013,"line":476},[1011,2093,2094],{"class":1031},"  --userid",[1011,2096,2097],{"class":1021}," 'sync@pbs!syncjob'",[1011,2099,1661],{"class":1031},[1011,2101,2102,2105,2108],{"class":1013,"line":1684},[1011,2103,2104],{"class":1031},"  --password",[1011,2106,2107],{"class":1021}," 'YOUR_API_TOKEN_SECRET'",[1011,2109,1661],{"class":1031},[1011,2111,2112,2115,2118,2120,2123,2126,2129],{"class":1013,"line":1695},[1011,2113,2114],{"class":1031},"  --fingerprint",[1011,2116,2117],{"class":1021}," \"$(",[1011,2119,1646],{"class":1017},[1011,2121,2122],{"class":1021}," cert info ",[1011,2124,2125],{"class":1759},"|",[1011,2127,2128],{"class":1017}," awk",[1011,2130,2131],{"class":1021}," '/Fingerprint/ {print $3}')\"\n",[10,2133,2134],{},"Then create the sync job from the local datastore (via the self-remote) into the S3 datastore:",[1003,2136,2138],{"className":1005,"code":2137,"language":1007,"meta":472,"style":472},"proxmox-backup-manager sync-job create offsite-sync \\\n  --remote self \\\n  --remote-store local-pbs \\\n  --store s3-offsite \\\n  --schedule 'daily' \\\n  --remove-vanished false\n",[129,2139,2140,2154,2163,2172,2181,2191],{"__ignoreMap":472},[1011,2141,2142,2144,2147,2149,2152],{"class":1013,"line":1014},[1011,2143,1646],{"class":1017},[1011,2145,2146],{"class":1021}," sync-job",[1011,2148,1655],{"class":1021},[1011,2150,2151],{"class":1021}," offsite-sync",[1011,2153,1661],{"class":1031},[1011,2155,2156,2159,2161],{"class":1013,"line":473},[1011,2157,2158],{"class":1031},"  --remote",[1011,2160,2077],{"class":1021},[1011,2162,1661],{"class":1031},[1011,2164,2165,2168,2170],{"class":1013,"line":476},[1011,2166,2167],{"class":1031},"  --remote-store",[1011,2169,2022],{"class":1021},[1011,2171,1661],{"class":1031},[1011,2173,2174,2177,2179],{"class":1013,"line":1684},[1011,2175,2176],{"class":1031},"  --store",[1011,2178,1895],{"class":1021},[1011,2180,1661],{"class":1031},[1011,2182,2183,2186,2189],{"class":1013,"line":1695},[1011,2184,2185],{"class":1031},"  --schedule",[1011,2187,2188],{"class":1021}," 'daily'",[1011,2190,1661],{"class":1031},[1011,2192,2194,2197],{"class":1013,"line":2193},6,[1011,2195,2196],{"class":1031},"  --remove-vanished",[1011,2198,2199],{"class":1031}," false\n",[19,2201,2202,2208,2214,2220],{},[22,2203,2204,2207],{},[129,2205,2206],{},"--remote self"," references the loopback remote you just created.",[22,2209,2210,2213],{},[129,2211,2212],{},"--remote-store local-pbs"," is the source datastore.",[22,2215,2216,2219],{},[129,2217,2218],{},"--store s3-offsite"," is the target (S3) datastore.",[22,2221,2222,2225,2226,2229],{},[129,2223,2224],{},"--remove-vanished false"," is a ",[26,2227,2228],{},"ransomware safety measure",": if an attacker deletes snapshots on your local datastore, the sync job won't propagate those deletions to S3. Manage retention directly on the S3 datastore with a separate prune job.",[10,2231,2232,2233,2236],{},"You can also configure all of this from the web UI under ",[26,2234,2235],{},"Datastore → s3-offsite → Sync Jobs → Add"," after the self-remote is in place — often the faster path for first-time setup.",[1448,2238,2240],{"id":2239},"step-5-encryption-do-not-skip","Step 5 — Encryption (do not skip)",[10,2242,2243],{},"PBS supports client-side encryption. Chunks are encrypted on the PVE host before being sent to PBS — your S3 provider only ever sees ciphertext. This is independent of the bucket being public-accessible or not; correctly configured, a compromised bucket leaks nothing useful.",[10,2245,2246],{},"On each PVE node:",[1003,2248,2250],{"className":1005,"code":2249,"language":1007,"meta":472,"style":472},"proxmox-backup-client key create /etc/pve/priv/pbs-encryption.key\n",[129,2251,2252],{"__ignoreMap":472},[1011,2253,2254,2257,2260,2262],{"class":1013,"line":1014},[1011,2255,2256],{"class":1017},"proxmox-backup-client",[1011,2258,2259],{"class":1021}," key",[1011,2261,1655],{"class":1021},[1011,2263,2264],{"class":1021}," /etc/pve/priv/pbs-encryption.key\n",[10,2266,2267,2268,2271],{},"Reference the key in your storage config in PVE (",[26,2269,2270],{},"Datacenter → Storage → your PBS storage → Encryption Key","). All subsequent backups are encrypted.",[1491,2273,2274],{},[10,2275,1495,2276,2279],{},[26,2277,2278],{},"Back up the encryption key separately — not on PBS, not in the S3 bucket it protects."," If you lose the key, every backup in S3 is unrecoverable. Print the paper-key version and store it in a safe, or keep it in a password manager that is not itself backed up to the same PBS.",[10,2281,2282,2283,2286],{},"Use the master-key feature (",[129,2284,2285],{},"--master-pubkey-file",") to allow recovery of individual backup keys from a master keypair. The Proxmox Backup Client documentation covers the master-key workflow in detail.",[1448,2288,2290],{"id":2289},"step-6-garbage-collection-and-verification-on-s3","Step 6 — Garbage collection and verification on S3",[10,2292,2293,2294,2297],{},"GC on an S3-backed datastore issues significantly more API requests than GC on local storage. Schedule it ",[26,2295,2296],{},"less frequently"," than you would locally — weekly is reasonable for most workloads, not daily.",[1003,2299,2301],{"className":1005,"code":2300,"language":1007,"meta":472,"style":472},"proxmox-backup-manager datastore update s3-offsite --gc-schedule 'Sun 04:00'\n",[129,2302,2303],{"__ignoreMap":472},[1011,2304,2305,2307,2309,2312,2314,2317],{"class":1013,"line":1014},[1011,2306,1646],{"class":1017},[1011,2308,1890],{"class":1021},[1011,2310,2311],{"class":1021}," update",[1011,2313,1895],{"class":1021},[1011,2315,2316],{"class":1031}," --gc-schedule",[1011,2318,2319],{"class":1021}," 'Sun 04:00'\n",[10,2321,2322,2323,2326],{},"Verification jobs read chunks back and recompute their hashes. On S3 this means downloading chunks — egress cost applies unless your provider offers zero-egress. Configure verify jobs from the web UI under ",[26,2324,2325],{},"Datastore → Verify Jobs → Add"," with a conservative schedule (monthly is a reasonable starting point for S3 datastores). Enable the \"skip verified\" option with a 30-day window so verification is incremental rather than full.",[10,2328,2329],{},"Manual verification is also possible from the CLI:",[1003,2331,2333],{"className":1005,"code":2332,"language":1007,"meta":472,"style":472},"proxmox-backup-manager verify s3-offsite --ignore-verified true\n",[129,2334,2335],{"__ignoreMap":472},[1011,2336,2337,2339,2342,2344,2347],{"class":1013,"line":1014},[1011,2338,1646],{"class":1017},[1011,2340,2341],{"class":1021}," verify",[1011,2343,1895],{"class":1021},[1011,2345,2346],{"class":1031}," --ignore-verified",[1011,2348,2349],{"class":1031}," true\n",[1448,2351,2353],{"id":2352},"step-7-test-a-restore","Step 7 — Test a restore",[10,2355,2356],{},"A backup you have not restored is a backup you do not have. Before relying on the setup:",[820,2358,2359,2362,2367,2370],{},[22,2360,2361],{},"From the PBS UI, select a snapshot on the S3 datastore.",[22,2363,216,2364,29],{},[26,2365,2366],{},"File Restore",[22,2368,2369],{},"Browse the archive and extract a handful of files.",[22,2371,2372],{},"Separately, restore an entire VM snapshot to a new VM ID on PVE and boot it.",[10,2374,2375],{},"Do this after the initial setup, after any PBS upgrade, and on a rotating sample of snapshots at least monthly.",[1448,2377,2379],{"id":2378},"hummingtribe-s3-configuration","HummingTribe S3 configuration",[10,2381,2382],{},"HummingTribe S3 runs on Garage (S3-compatible) from our Hetzner facility in Germany. All storage is in the EU, zero egress fees, GDPR-compliant by default.",[10,2384,2385],{},"Values you'll use in the PBS S3 endpoint config:",[102,2387,2388,2396],{},[105,2389,2390],{},[108,2391,2392,2394],{},[111,2393,113],{},[111,2395,116],{},[118,2397,2398,2407,2415,2425],{},[108,2399,2400,2403],{},[123,2401,2402],{},"Endpoint",[123,2404,2405],{},[129,2406,141],{},[108,2408,2409,2412],{},[123,2410,2411],{},"Region",[123,2413,2414],{},"(see your account dashboard)",[108,2416,2417,2420],{},[123,2418,2419],{},"Path style",[123,2421,2422],{},[129,2423,2424],{},"true",[108,2426,2427,2430],{},[123,2428,2429],{},"Access key / Secret key",[123,2431,2432],{},"from your S3 console",[10,2434,2435],{},"Create the endpoint:",[1003,2437,2439],{"className":1005,"code":2438,"language":1007,"meta":472,"style":472},"proxmox-backup-manager s3 endpoint create hummingtribe \\\n  --access-key 'YOUR_HT_ACCESS_KEY' \\\n  --secret-key 'YOUR_HT_SECRET_KEY' \\\n  --endpoint 'storage.hummingtribe.com' \\\n  --region 'YOUR_REGION' \\\n  --path-style true\n",[129,2440,2441,2456,2465,2474,2483,2492],{"__ignoreMap":472},[1011,2442,2443,2445,2447,2449,2451,2454],{"class":1013,"line":1014},[1011,2444,1646],{"class":1017},[1011,2446,1649],{"class":1021},[1011,2448,1652],{"class":1021},[1011,2450,1655],{"class":1021},[1011,2452,2453],{"class":1021}," hummingtribe",[1011,2455,1661],{"class":1031},[1011,2457,2458,2460,2463],{"class":1013,"line":473},[1011,2459,1666],{"class":1031},[1011,2461,2462],{"class":1021}," 'YOUR_HT_ACCESS_KEY'",[1011,2464,1661],{"class":1031},[1011,2466,2467,2469,2472],{"class":1013,"line":476},[1011,2468,1676],{"class":1031},[1011,2470,2471],{"class":1021}," 'YOUR_HT_SECRET_KEY'",[1011,2473,1661],{"class":1031},[1011,2475,2476,2478,2481],{"class":1013,"line":1684},[1011,2477,1687],{"class":1031},[1011,2479,2480],{"class":1021}," 'storage.hummingtribe.com'",[1011,2482,1661],{"class":1031},[1011,2484,2485,2487,2490],{"class":1013,"line":1695},[1011,2486,1698],{"class":1031},[1011,2488,2489],{"class":1021}," 'YOUR_REGION'",[1011,2491,1661],{"class":1031},[1011,2493,2494,2497],{"class":1013,"line":2193},[1011,2495,2496],{"class":1031},"  --path-style",[1011,2498,2349],{"class":1031},[10,2500,2501],{},"Then create the datastore against your HummingTribe bucket:",[1003,2503,2505],{"className":1005,"code":2504,"language":1007,"meta":472,"style":472},"proxmox-backup-manager datastore create ht-s3-offsite \\\n  /mnt/datastore/ht-s3-cache \\\n  --backend type=s3,client=hummingtribe,bucket=your-bucket-name\n",[129,2506,2507,2520,2527],{"__ignoreMap":472},[1011,2508,2509,2511,2513,2515,2518],{"class":1013,"line":1014},[1011,2510,1646],{"class":1017},[1011,2512,1890],{"class":1021},[1011,2514,1655],{"class":1021},[1011,2516,2517],{"class":1021}," ht-s3-offsite",[1011,2519,1661],{"class":1031},[1011,2521,2522,2525],{"class":1013,"line":473},[1011,2523,2524],{"class":1021},"  /mnt/datastore/ht-s3-cache",[1011,2526,1661],{"class":1031},[1011,2528,2529,2531],{"class":1013,"line":476},[1011,2530,1909],{"class":1031},[1011,2532,2533],{"class":1021}," type=s3,client=hummingtribe,bucket=your-bucket-name\n",[10,2535,2536,2539],{},[26,2537,2538],{},"Why this is a fit for PBS offsite:"," zero egress means restore and verification operations don't incur surprise costs. EU-only data residency satisfies GDPR without a DPA negotiation. Flat monthly pricing removes the API-request cost variable that hurts cloud object storage PBS deployments on hyperscaler clouds.",[1448,2541,2543],{"id":2542},"troubleshooting","Troubleshooting",[10,2545,2546,2552,2553,2556],{},[26,2547,2548,2551],{},[129,2549,2550],{},"certificate verify failed"," on endpoint test."," Self-signed or private CA cert. Add ",[129,2554,2555],{},"--fingerprint"," to the endpoint config with the SHA-256 fingerprint.",[10,2558,2559,2565,2566,1236,2569,2572,2573,2576,2577,800,2580,2583],{},[26,2560,2561,2564],{},[129,2562,2563],{},"Access Denied"," on datastore creation."," Access key missing ",[129,2567,2568],{},"s3:PutObject",[129,2570,2571],{},"s3:DeleteObject",", or ",[129,2574,2575],{},"s3:ListBucket"," on the bucket. On AWS IAM, the policy needs both ",[129,2578,2579],{},"arn:aws:s3:::bucket-name",[129,2581,2582],{},"arn:aws:s3:::bucket-name/*"," resources.",[10,2585,2586,2589,2590,2593],{},[26,2587,2588],{},"Region errors on Cloudflare R2 or similar."," Set ",[129,2591,2592],{},"--region auto"," — R2 does not validate the region name but requires a non-empty value.",[10,2595,2596,2599],{},[26,2597,2598],{},"Datastore creation fails with \"path already a datastore\"."," Pick a cache path that is not already a PBS datastore. The cache cannot be nested inside another datastore directory.",[10,2601,2602,2605,2606,2609,2610,800,2613,2616,2617,2620],{},[26,2603,2604],{},"Migrating to a new PBS host."," On the new host, recreate the S3 endpoint config identically, then create the datastore with the ",[26,2607,2608],{},"same datastore name"," and both ",[129,2611,2612],{},"--reuse-datastore true",[129,2614,2615],{},"--overwrite-in-use true",". Never run two PBS instances against the same S3 datastore simultaneously — use the ",[129,2618,2619],{},"overwrite-in-use"," flag only when the original host is retired.",[10,2622,2623,2626,2627,2630,2631,2634],{},[26,2624,2625],{},"Running out of space on S3 mid-write."," Cleanup operations may fail alongside. Manually remove stray objects for the affected snapshot in the S3 console, then run an ",[26,2628,2629],{},"S3 refresh"," on the datastore (UI: ",[26,2632,2633],{},"Datastore → Refresh from S3",", or via CLI).",[1448,2636,2638],{"id":2637},"what-to-do-next","What to do next",[10,2640,2641,2642,2645,2646,2649,2650,2653,2654,2658],{},"If you're evaluating providers for PBS offsite, the three variables that matter are: ",[26,2643,2644],{},"data residency"," (EU if you need GDPR), ",[26,2647,2648],{},"egress pricing"," (zero-egress beats per-GB charges for any verification workload), and ",[26,2651,2652],{},"API request pricing"," (matters for GC frequency). ",[50,2655,2657],{"href":2656},"/s3#pricing","HummingTribe S3"," addresses all three, with flat per-TB pricing and no egress charges, hosted in Germany.",[1401,2660,2661],{},"html pre.shiki code .sScJk, html code.shiki .sScJk{--shiki-default:#6F42C1;--shiki-dark:#B392F0}html pre.shiki code .sZZnC, html code.shiki .sZZnC{--shiki-default:#032F62;--shiki-dark:#9ECBFF}html pre.shiki code .sj4cs, html code.shiki .sj4cs{--shiki-default:#005CC5;--shiki-dark:#79B8FF}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .dark .shiki span {color: var(--shiki-dark);background: var(--shiki-dark-bg);font-style: var(--shiki-dark-font-style);font-weight: var(--shiki-dark-font-weight);text-decoration: var(--shiki-dark-text-decoration);}html.dark .shiki span {color: var(--shiki-dark);background: var(--shiki-dark-bg);font-style: var(--shiki-dark-font-style);font-weight: var(--shiki-dark-font-weight);text-decoration: var(--shiki-dark-text-decoration);}html pre.shiki code .szBVR, html code.shiki .szBVR{--shiki-default:#D73A49;--shiki-dark:#F97583}",{"title":472,"searchDepth":473,"depth":473,"links":2663},[2664,2665,2666,2670,2671,2672,2673,2674,2675,2676,2677,2678,2679,2680,2681],{"id":1450,"depth":473,"text":1451},{"id":1488,"depth":473,"text":1489},{"id":1537,"depth":473,"text":1538,"children":2667},[2668,2669],{"id":1541,"depth":476,"text":1542},{"id":1560,"depth":476,"text":1561},{"id":1584,"depth":473,"text":1585},{"id":1619,"depth":473,"text":1620},{"id":1813,"depth":473,"text":1814},{"id":1860,"depth":473,"text":1861},{"id":1959,"depth":473,"text":1960},{"id":2002,"depth":473,"text":2003},{"id":2239,"depth":473,"text":2240},{"id":2289,"depth":473,"text":2290},{"id":2352,"depth":473,"text":2353},{"id":2378,"depth":473,"text":2379},{"id":2542,"depth":473,"text":2543},{"id":2637,"depth":473,"text":2638},"Configure PBS with S3-compatible object storage: primary datastore, local + offsite sync pattern, encryption, and GC tuning. For homelab and MSP use.",{},"/docs/proxmox-backup-server-s3-offsite",{"title":1427,"description":2682},{"loc":2684},"docs/proxmox-backup-server-s3-offsite",[2689,2690,495,496,2691,497],"pbs","proxmox","offsite","KZ8RonFJOmpTwwHf105xbY-yodEm9fVVhwbjneiq2wg",1776858558107]