aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore101
-rwxr-xr-xsquashr.rs1090
2 files changed, 1191 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4efb7e9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,101 @@
+backup
+test
+# Created by https://www.toptal.com/developers/gitignore/api/linux,windows,macos,vim
+# Edit at https://www.toptal.com/developers/gitignore?templates=linux,windows,macos,vim
+
+### Linux ###
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### macOS ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### macOS Patch ###
+# iCloud generated files
+*.icloud
+
+### Vim ###
+# Swap
+[._]*.s[a-v][a-z]
+!*.svg # comment out if you don't need vector files
+[._]*.sw[a-p]
+[._]s[a-rt-v][a-z]
+[._]ss[a-gi-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+Sessionx.vim
+
+# Temporary
+.netrwhist
+# Auto-generated tag files
+tags
+# Persistent undo
+[._]*.un~
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+# End of https://www.toptal.com/developers/gitignore/api/linux,windows,macos,vim
+
diff --git a/squashr.rs b/squashr.rs
new file mode 100755
index 0000000..2c77051
--- /dev/null
+++ b/squashr.rs
@@ -0,0 +1,1090 @@
+#!/usr/bin/env -S rust-script
+//! ```cargo
+//! [package]
+//! name = "squashr"
+//! version = "0.5.1"
+//! edition = "2021"
+//!
+//! [dependencies]
+//! anyhow = "1"
+//! clap = { version = "4", features = ["derive"] }
+//! serde = { version = "1", features = ["derive"] }
+//! itertools = "0.12"
+//! regex = "1"
+//! walkdir = "2"
+//! chrono = { version = "0.4", default-features = false, features = ["clock"] }
+//! rusqlite = { version = "0.31", features = ["bundled"] }
+//! libc = "0.2"
+//! ```
+
+use anyhow::{anyhow, bail, Context, Result};
+use clap::{Parser, Subcommand, ValueHint};
+use itertools::Itertools;
+use regex::Regex;
+use rusqlite::{params, Connection};
+use serde::Deserialize;
+use std::collections::{BTreeSet, HashMap, HashSet};
+use std::env;
+use std::fs;
+use std::io::Write;
+use std::os::unix::fs::MetadataExt;
+use std::path::{Path, PathBuf};
+use std::process::{Command, Stdio};
+use walkdir::WalkDir;
+use std::io::ErrorKind;
+
+#[derive(Parser, Debug)]
+#[command(name="squashr", version, about="Manage backups based on SquashFS, OverlayFS and LUKS")]
+struct Cli {
+ #[arg(short, long, value_hint=ValueHint::FilePath)]
+ config: Option<PathBuf>,
+
+ #[arg(long, value_hint=ValueHint::DirPath)]
+ squashr_root: Option<PathBuf>,
+
+ #[arg(long)]
+ squashr_n_snapshots_min: Option<usize>,
+
+ #[arg(long)]
+ squashr_n_snapshots_max: Option<usize>,
+
+ #[arg(long)]
+ squashr_include: Option<String>,
+
+ #[arg(long)]
+ squashr_exclude: Option<String>,
+
+ #[arg(long)]
+ squashr_truncate: Option<String>,
+
+ #[arg(long)]
+ squashr_compression_enable: Option<bool>,
+
+ #[arg(long)]
+ squashr_compression_algo: Option<String>,
+
+ #[arg(long, allow_hyphen_values = true)]
+ squashr_compression_args: Option<String>,
+
+ #[arg(long)]
+ squashr_cryptsetup_enable: Option<bool>,
+
+ #[arg(long, allow_hyphen_values = true)]
+ squashr_cryptsetup_create_args: Option<String>,
+
+ #[arg(long, allow_hyphen_values = true)]
+ squashr_cryptsetup_open_args: Option<String>,
+
+ #[command(subcommand)]
+ cmd: Cmd,
+}
+
+#[derive(Subcommand, Debug)]
+enum Cmd {
+ Backup,
+ Minimize { #[arg(short = 'n')] n: Option<usize> },
+ New,
+ Mount { #[arg(short = 's')] s: Option<usize>, target: PathBuf },
+ Umount { target: PathBuf },
+ Delete { #[arg(short = 's')] s: usize },
+}
+
+#[derive(Debug, Clone, Deserialize)]
+struct Config {
+ SQUASHR_ROOT: Option<PathBuf>,
+ SQUASHR_N_SNAPSHOTS_MIN: Option<usize>,
+ SQUASHR_N_SNAPSHOTS_MAX: Option<usize>,
+ SQUASHR_INCLUDE: Option<Vec<String>>,
+ SQUASHR_EXCLUDE: Option<Vec<String>>,
+ SQUASHR_TRUNCATE: Option<Vec<String>>,
+ SQUASHR_COMPRESSION_ENABLE: Option<bool>,
+ SQUASHR_COMPRESSION_ALGO: Option<String>,
+ SQUASHR_COMPRESSION_ARGS: Option<String>,
+ SQUASHR_CRYPTSETUP_ENABLE: Option<bool>,
+ SQUASHR_CRYPTSETUP_CREATE_ARGS: Option<String>,
+ SQUASHR_CRYPTSETUP_OPEN_ARGS: Option<String>,
+}
+
+impl Default for Config {
+ fn default() -> Self {
+ Self {
+ SQUASHR_ROOT: None,
+ SQUASHR_N_SNAPSHOTS_MIN: Some(5),
+ SQUASHR_N_SNAPSHOTS_MAX: Some(30),
+ SQUASHR_INCLUDE: None,
+ SQUASHR_EXCLUDE: None,
+ SQUASHR_TRUNCATE: None,
+ SQUASHR_COMPRESSION_ENABLE: Some(true),
+ SQUASHR_COMPRESSION_ALGO: Some("zstd".to_string()),
+ SQUASHR_COMPRESSION_ARGS: Some("-Xcompression-level 19".to_string()),
+ SQUASHR_CRYPTSETUP_ENABLE: Some(false),
+ SQUASHR_CRYPTSETUP_CREATE_ARGS: Some("--type luks2".to_string()),
+ SQUASHR_CRYPTSETUP_OPEN_ARGS: Some("--type luks".to_string()),
+ }
+ }
+}
+
+fn parse_list(s: &str) -> Vec<String> {
+ let mut out: Vec<String> = vec![];
+ for line in s.lines() {
+ let t = line.trim();
+ if !t.is_empty() { out.push(t.to_string()); }
+ }
+ out = out.into_iter()
+ .flat_map(|x| x.split(',').map(|y| y.trim().to_string()).collect::<Vec<_>>())
+ .collect();
+ out.into_iter().filter(|x| !x.is_empty()).collect()
+}
+
+fn load_config(path: Option<&Path>) -> Result<Config> {
+ let mut cfg = Config::default();
+ if let Some(p) = path {
+ if p.exists() {
+ let text = fs::read_to_string(p).with_context(|| format!("Konfig lesen: {}", p.display()))?;
+ let mut map: HashMap<String, String> = HashMap::new();
+ for line in text.lines() {
+ let l = line.trim();
+ if l.is_empty() || l.starts_with('#') { continue; }
+ if let Some((k,v)) = l.split_once('=') {
+ map.insert(k.trim().to_string(), v.trim().to_string());
+ }
+ }
+ let assign = |k:&str, f: &mut dyn FnMut(String)| {
+ if let Some(v)=map.get(k).cloned(){ f(v) }
+ };
+ let set = |k:&str, f:&mut dyn FnMut(String)| assign(k, f);
+ set("SQUASHR_ROOT", &mut |v| cfg.SQUASHR_ROOT = Some(PathBuf::from(v)));
+ set("SQUASHR_N_SNAPSHOTS_MIN", &mut |v| cfg.SQUASHR_N_SNAPSHOTS_MIN = v.parse().ok());
+ set("SQUASHR_N_SNAPSHOTS_MAX", &mut |v| cfg.SQUASHR_N_SNAPSHOTS_MAX = v.parse().ok());
+ set("SQUASHR_INCLUDE", &mut |v| cfg.SQUASHR_INCLUDE = Some(parse_list(&v)));
+ set("SQUASHR_EXCLUDE", &mut |v| cfg.SQUASHR_EXCLUDE = Some(parse_list(&v)));
+ set("SQUASHR_TRUNCATE", &mut |v| cfg.SQUASHR_TRUNCATE = Some(parse_list(&v)));
+ set("SQUASHR_COMPRESSION_ENABLE", &mut |v| cfg.SQUASHR_COMPRESSION_ENABLE = v.parse().ok());
+ set("SQUASHR_COMPRESSION_ALGO", &mut |v| cfg.SQUASHR_COMPRESSION_ALGO = Some(v));
+ set("SQUASHR_COMPRESSION_ARGS", &mut |v| cfg.SQUASHR_COMPRESSION_ARGS = Some(v));
+ set("SQUASHR_CRYPTSETUP_ENABLE", &mut |v| cfg.SQUASHR_CRYPTSETUP_ENABLE = v.parse().ok());
+ set("SQUASHR_CRYPTSETUP_CREATE_ARGS", &mut |v| cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS = Some(v));
+ set("SQUASHR_CRYPTSETUP_OPEN_ARGS", &mut |v| cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS = Some(v));
+ }
+ }
+ Ok(cfg)
+}
+
+fn bool_env(name:&str)->Option<bool>{ env::var(name).ok()?.parse().ok() }
+fn str_env(name:&str)->Option<String>{ env::var(name).ok() }
+fn list_env(name:&str)->Option<Vec<String>>{ env::var(name).ok().map(|v| parse_list(&v)) }
+
+fn apply_env_overrides(mut cfg: Config)->Result<Config>{
+ if let Some(v)=str_env("SQUASHR_ROOT"){ cfg.SQUASHR_ROOT=Some(PathBuf::from(v)); }
+ if let Some(v)=env::var("SQUASHR_N_SNAPSHOTS_MIN").ok().and_then(|x| x.parse().ok()){ cfg.SQUASHR_N_SNAPSHOTS_MIN=Some(v); }
+ if let Some(v)=env::var("SQUASHR_N_SNAPSHOTS_MAX").ok().and_then(|x| x.parse().ok()){ cfg.SQUASHR_N_SNAPSHOTS_MAX=Some(v); }
+ if let Some(v)=list_env("SQUASHR_INCLUDE"){ cfg.SQUASHR_INCLUDE=Some(v); }
+ if let Some(v)=list_env("SQUASHR_EXCLUDE"){ cfg.SQUASHR_EXCLUDE=Some(v); }
+ if let Some(v)=list_env("SQUASHR_TRUNCATE"){ cfg.SQUASHR_TRUNCATE=Some(v); }
+ if let Some(v)=bool_env("SQUASHR_COMPRESSION_ENABLE"){ cfg.SQUASHR_COMPRESSION_ENABLE=Some(v); }
+ if let Some(v)=str_env("SQUASHR_COMPRESSION_ALGO"){ cfg.SQUASHR_COMPRESSION_ALGO=Some(v); }
+ if let Some(v)=str_env("SQUASHR_COMPRESSION_ARGS"){ cfg.SQUASHR_COMPRESSION_ARGS=Some(v); }
+ if let Some(v)=bool_env("SQUASHR_CRYPTSETUP_ENABLE"){ cfg.SQUASHR_CRYPTSETUP_ENABLE=Some(v); }
+ if let Some(v)=str_env("SQUASHR_CRYPTSETUP_CREATE_ARGS"){ cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS=Some(v); }
+ if let Some(v)=str_env("SQUASHR_CRYPTSETUP_OPEN_ARGS"){ cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS=Some(v); }
+ Ok(cfg)
+}
+
+fn apply_cli_overrides(mut cfg: Config, cli:&Cli)->Result<Config>{
+ if let Some(v)=cli.squashr_root.clone(){ cfg.SQUASHR_ROOT=Some(v); }
+ if let Some(v)=cli.squashr_n_snapshots_min{ cfg.SQUASHR_N_SNAPSHOTS_MIN=Some(v); }
+ if let Some(v)=cli.squashr_n_snapshots_max{ cfg.SQUASHR_N_SNAPSHOTS_MAX=Some(v); }
+ if let Some(v)=cli.squashr_include.as_ref(){ cfg.SQUASHR_INCLUDE=Some(parse_list(v)); }
+ if let Some(v)=cli.squashr_exclude.as_ref(){ cfg.SQUASHR_EXCLUDE=Some(parse_list(v)); }
+ if let Some(v)=cli.squashr_truncate.as_ref(){ cfg.SQUASHR_TRUNCATE=Some(parse_list(v)); }
+ if let Some(v)=cli.squashr_compression_enable{ cfg.SQUASHR_COMPRESSION_ENABLE=Some(v); }
+ if let Some(v)=cli.squashr_compression_algo.clone(){ cfg.SQUASHR_COMPRESSION_ALGO=Some(v); }
+ if let Some(v)=cli.squashr_compression_args.clone(){ cfg.SQUASHR_COMPRESSION_ARGS=Some(v); }
+ if let Some(v)=cli.squashr_cryptsetup_enable{ cfg.SQUASHR_CRYPTSETUP_ENABLE=Some(v); }
+ if let Some(v)=cli.squashr_cryptsetup_create_args.clone(){ cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS=Some(v); }
+ if let Some(v)=cli.squashr_cryptsetup_open_args.clone(){ cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS=Some(v); }
+ Ok(cfg)
+}
+
+#[derive(Clone)]
+struct Ctx {
+ state_dir: PathBuf,
+ mounts_dir: PathBuf,
+ work_dir: PathBuf,
+ index_db: PathBuf,
+ min_keep: usize,
+ max_keep: Option<usize>,
+ include: Vec<PathBuf>,
+ exclude: Vec<PathBuf>,
+ truncate: Vec<PathBuf>,
+ comp_enable: bool,
+ comp_algo: String,
+ comp_args: Option<String>,
+ luks_enable: bool,
+ luks_create_args: Option<String>,
+ luks_open_args: Option<String>,
+}
+
+impl Ctx {
+ fn new(cfg: Config) -> Result<Self> {
+ let root = cfg.SQUASHR_ROOT.ok_or_else(|| anyhow!("SQUASHR_ROOT not set!"))?;
+ let state_dir = root.join("state");
+ let mounts_dir = root.join("mounts");
+ let work_dir = root.join("work");
+ fs::create_dir_all(&state_dir)?;
+ fs::create_dir_all(&mounts_dir)?;
+ fs::create_dir_all(&work_dir)?;
+ let index_db = state_dir.join("index.sqlite");
+ let max_raw = cfg.SQUASHR_N_SNAPSHOTS_MAX.unwrap_or(0);
+ let max_keep = if max_raw == 0 { None } else { Some(max_raw) };
+ Ok(Self {
+ state_dir,
+ mounts_dir,
+ work_dir,
+ index_db,
+ min_keep: cfg.SQUASHR_N_SNAPSHOTS_MIN.unwrap_or(5),
+ max_keep,
+ include: cfg.SQUASHR_INCLUDE.unwrap_or_default().into_iter().map(PathBuf::from).collect(),
+ exclude: cfg.SQUASHR_EXCLUDE.unwrap_or_default().into_iter().map(PathBuf::from).collect(),
+ truncate: cfg.SQUASHR_TRUNCATE.unwrap_or_default().into_iter().map(PathBuf::from).collect(),
+ comp_enable: cfg.SQUASHR_COMPRESSION_ENABLE.unwrap_or(true),
+ comp_algo: cfg.SQUASHR_COMPRESSION_ALGO.unwrap_or_else(|| "zstd".into()),
+ comp_args: cfg.SQUASHR_COMPRESSION_ARGS,
+ luks_enable: cfg.SQUASHR_CRYPTSETUP_ENABLE.unwrap_or(false),
+ luks_create_args: cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS,
+ luks_open_args: cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS,
+ })
+ }
+
+ fn list_snapshots(&self) -> Result<Vec<PathBuf>> {
+ let mut entries: Vec<_> = fs::read_dir(&self.state_dir)?
+ .filter_map(|e| e.ok())
+ .map(|e| e.path())
+ .filter(|p| p.is_file())
+ .filter(|p| {
+ p.file_name()
+ .and_then(|n| n.to_str())
+ .map(|s| s.ends_with(".squashfs") || s.ends_with(".squashfs.luks"))
+ .unwrap_or(false)
+ })
+ .collect();
+ entries.sort();
+ Ok(entries)
+ }
+
+ fn snapshot_path(&self, idx: usize, encrypted: bool) -> PathBuf {
+ let name = format!("{:04}{}", idx, if encrypted { ".squashfs.luks" } else { ".squashfs" });
+ self.state_dir.join(name)
+ }
+
+ fn temp_path(&self, stem: &str) -> PathBuf {
+ let ts = chrono::Local::now().format("%Y%m%d-%H%M%S");
+ self.work_dir.join(format!("{}_{}", stem, ts))
+ }
+}
+
+fn main() -> Result<()> {
+ let cli = Cli::parse();
+ let cfg = load_config(cli.config.as_deref())?;
+ let cfg = apply_env_overrides(cfg)?;
+ let cfg = apply_cli_overrides(cfg, &cli)?;
+ let mut ctx = Ctx::new(cfg)?;
+
+ match cli.cmd {
+ Cmd::Backup => cmd_backup(&mut ctx),
+ Cmd::Minimize { n } => cmd_minimize(&mut ctx, n),
+ Cmd::New => cmd_new(&mut ctx),
+ Cmd::Mount { s, target } => cmd_mount(&mut ctx, s, &target),
+ Cmd::Umount { target } => cmd_umount(&mut ctx, &target),
+ Cmd::Delete { s } => cmd_delete(&mut ctx, s),
+ }
+}
+
+fn abspath(p: &Path) -> PathBuf {
+ fs::canonicalize(p).unwrap_or_else(|_| p.to_path_buf())
+}
+
+fn abs_key(p: &Path) -> Result<String> {
+ let abs = fs::canonicalize(p)?;
+ let s = abs.to_string_lossy();
+ Ok(s.trim_start_matches('/').to_string())
+}
+
+fn parse_snap_index(p:&Path)->Result<usize>{
+ let fname = p.file_stem().and_then(|s| s.to_str()).ok_or_else(||anyhow!("Invalid filename: {}", p.display()))?;
+ let re = Regex::new(r"^(\d{4})$").unwrap();
+ let caps = re.captures(fname).ok_or_else(|| anyhow!("No index found: {}", p.display()))?;
+ Ok(caps.get(1).unwrap().as_str().parse::<usize>().unwrap())
+}
+
+fn shell_split(s:&str)->Vec<String>{
+ s.split_whitespace().map(|x| x.to_string()).collect()
+}
+
+fn run(cmd:&mut Command, desc:&str)->Result<()>{
+ let status = cmd.status().with_context(|| format!("Unable to start {}", desc))?;
+ if !status.success(){ bail!("Command failed: {}", desc); }
+ Ok(())
+}
+
+fn run_interactive(cmd:&mut Command, desc:&str)->Result<()>{
+ cmd.stdin(Stdio::inherit()).stdout(Stdio::inherit()).stderr(Stdio::inherit());
+ run(cmd, desc)
+}
+
+fn require_root(op: &str) -> Result<()> {
+ if unsafe { libc::geteuid() } != 0 {
+ bail!("{op} requires root privileges.");
+ }
+ Ok(())
+}
+
+fn ensure_includes_nonempty(ctx:&Ctx)->Result<()>{
+ if ctx.include.is_empty(){ bail!("SQUASHR_INCLUDE is empty."); }
+ Ok(())
+}
+
+fn is_excluded(ctx:&Ctx, p:&Path) -> bool {
+ let Ok(cp) = fs::canonicalize(p) else { return false; };
+ for ex in &ctx.exclude {
+ if let Ok(cex) = fs::canonicalize(ex) {
+ if cp.starts_with(&cex) { return true; }
+ }
+ }
+ false
+}
+
+fn truncate_logs(ctx:&Ctx)->Result<()>{
+ if ctx.truncate.is_empty(){ return Ok(()); }
+ for base in &ctx.truncate {
+ if !base.exists(){ continue; }
+ for entry in WalkDir::new(base).follow_links(false).into_iter().filter_map(|e| e.ok()) {
+ let p = entry.path();
+ if p.is_file() {
+ let name = p.file_name().and_then(|s| s.to_str()).unwrap_or("");
+ if name.ends_with(".gz") || name.ends_with(".xz") || name.ends_with(".zst") || name.ends_with(".bz2") {
+ let _ = fs::remove_file(p);
+ continue;
+ }
+ let _ = fs::OpenOptions::new().write(true).open(p).and_then(|f| { f.set_len(0)?; Ok(()) });
+ }
+ }
+ }
+ Ok(())
+}
+
+fn db_open_init(ctx:&Ctx) -> Result<Connection> {
+ let must_init = !ctx.index_db.exists();
+ let conn = Connection::open(&ctx.index_db)
+ .with_context(|| format!("Opening index.sqlite: {}", ctx.index_db.display()))?;
+ conn.pragma_update(None, "journal_mode", "WAL")?;
+ conn.pragma_update(None, "synchronous", "NORMAL")?;
+ conn.execute_batch("PRAGMA foreign_keys=ON;")?;
+
+ if must_init {
+ conn.execute_batch(r#"
+ CREATE TABLE IF NOT EXISTS snapshots(
+ id INTEGER PRIMARY KEY,
+ created_at TEXT NOT NULL
+ );
+ CREATE TABLE IF NOT EXISTS roots(
+ id INTEGER PRIMARY KEY,
+ top_name TEXT UNIQUE NOT NULL,
+ abs_path TEXT NOT NULL
+ );
+ CREATE TABLE IF NOT EXISTS files(
+ id INTEGER PRIMARY KEY,
+ root_id INTEGER NOT NULL,
+ rel_path TEXT NOT NULL,
+ kind TEXT NOT NULL,
+ UNIQUE(root_id, rel_path),
+ FOREIGN KEY(root_id) REFERENCES roots(id) ON DELETE CASCADE
+ );
+ CREATE TABLE IF NOT EXISTS events(
+ snapshot_id INTEGER NOT NULL,
+ file_id INTEGER NOT NULL,
+ change CHAR NOT NULL CHECK(change IN ('c','m','d')),
+ size INTEGER,
+ mtime INTEGER,
+ checksum TEXT,
+ PRIMARY KEY(snapshot_id, file_id),
+ FOREIGN KEY(snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
+ FOREIGN KEY(file_id) REFERENCES files(id) ON DELETE CASCADE
+ );
+ "#)?;
+ }
+ Ok(conn)
+}
+
+fn ensure_root(conn:&Connection, include_abs:&Path) -> Result<(i64,String,PathBuf)> {
+ let top = abs_key(include_abs)?;
+ conn.execute("INSERT OR IGNORE INTO roots(top_name,abs_path) VALUES(?,?)",
+ params![&top, include_abs.to_string_lossy()])?;
+ let id = conn.query_row("SELECT id FROM roots WHERE top_name=?", params![&top], |r| r.get(0))?;
+ Ok((id, top, include_abs.to_path_buf()))
+}
+
+fn ensure_file_row(conn:&Connection, root_id:i64, rel:&str, kind:char) -> Result<i64> {
+ conn.execute("INSERT OR IGNORE INTO files(root_id,rel_path,kind) VALUES(?,?,?)",
+ params![root_id, rel, kind.to_string()])?;
+ Ok(conn.query_row("SELECT id FROM files WHERE root_id=? AND rel_path=?",
+ params![root_id, rel], |r| r.get(0))?)
+}
+
+fn prev_last_events(conn:&Connection) -> Result<HashMap<(String,String),(String,i64,i64)>> {
+ let mut map = HashMap::new();
+ let sql = r#"
+ SELECT r.top_name, f.rel_path, e.change, e.size, e.mtime
+ FROM files f
+ JOIN roots r ON r.id=f.root_id
+ JOIN events e ON e.file_id=f.id
+ WHERE e.snapshot_id = (SELECT MAX(e2.snapshot_id) FROM events e2 WHERE e2.file_id=f.id)
+ "#;
+ let mut stmt = conn.prepare(sql)?;
+ let rows = stmt.query_map([], |row| {
+ Ok((
+ row.get::<_,String>(0)?,
+ row.get::<_,String>(1)?,
+ row.get::<_,String>(2)?,
+ row.get::<_,i64>(3)?,
+ row.get::<_,i64>(4)?,
+ ))
+ })?;
+ for r in rows {
+ let (top, rel, ch, sz, mt) = r?;
+ map.insert((top, rel), (ch, sz, mt));
+ }
+ Ok(map)
+}
+
+fn deletions_up_to(conn:&Connection, upto:i64) -> Result<Vec<(String,String)>> {
+ let mut out = vec![];
+ let sql = r#"
+ SELECT r.top_name, f.rel_path
+ FROM files f
+ JOIN roots r ON r.id=f.root_id
+ JOIN events e ON e.file_id=f.id
+ WHERE e.snapshot_id = (
+ SELECT MAX(e2.snapshot_id) FROM events e2 WHERE e2.file_id=f.id AND e2.snapshot_id <= ?
+ )
+ AND e.change='d'
+ "#;
+ let mut stmt = conn.prepare(sql)?;
+ let rows = stmt.query_map(params![upto], |row| Ok((row.get::<_,String>(0)?, row.get::<_,String>(1)?)))?;
+ for r in rows { out.push(r?); }
+ Ok(out)
+}
+
+#[derive(Debug, Clone)]
+struct FsMeta { size:u64, mtime:i64 }
+
+fn scan_current(ctx:&Ctx, conn:&Connection) -> Result<(HashMap<(String,String),FsMeta>, HashMap<String,(i64,PathBuf)>)> {
+ let mut now_map = HashMap::new();
+ let mut roots = HashMap::new();
+ for inc in &ctx.include {
+ let inc_abs = fs::canonicalize(inc)?;
+ let (root_id, top_name, abs_root) = ensure_root(conn, &inc_abs)?;
+ roots.insert(top_name.clone(), (root_id, abs_root.clone()));
+ for e in WalkDir::new(&abs_root).follow_links(false).into_iter().filter_map(|e| e.ok()) {
+ let p = e.path();
+ if is_excluded(ctx, p) { continue; }
+ let md = fs::symlink_metadata(p)?;
+ if md.is_dir() { continue; }
+ if !(md.file_type().is_symlink() || md.is_file()) { continue; }
+ let rel = p.strip_prefix(&abs_root).unwrap_or(Path::new("")).to_string_lossy().to_string();
+ now_map.insert((top_name.clone(), rel), FsMeta{ size: md.len(), mtime: md.mtime() });
+ }
+ }
+ Ok((now_map, roots))
+}
+
+fn build_squash_image_pf(
+ ctx: &Ctx,
+ out: &Path,
+ changed: &[(String, String)],
+ roots: &HashMap<String, (i64, PathBuf)>,
+) -> Result<()> {
+ use std::collections::BTreeSet;
+
+ let mut dirset: BTreeSet<String> = BTreeSet::new();
+ for (top_ns, rel) in changed {
+ let img_path = if rel.is_empty() {
+ top_ns.clone()
+ } else {
+ format!("{}/{}", top_ns, rel)
+ };
+ let mut cur = Path::new(&img_path);
+ while let Some(p) = cur.parent() {
+ let s = p.to_string_lossy();
+ if !s.is_empty() {
+ dirset.insert(s.to_string());
+ }
+ if p.as_os_str().is_empty() { break; }
+ cur = p;
+ }
+ }
+
+ let pf = ctx.temp_path("filelist.pf");
+ {
+ let mut f = fs::File::create(&pf)?;
+ writeln!(f, "/ d 0755 0 0")?;
+
+ let mut dirs: Vec<String> = dirset.into_iter().collect();
+ dirs.sort_by_key(|d| Path::new(d).components().count());
+ for d in &dirs {
+ writeln!(f, "{} d 0755 0 0", d)?;
+ }
+ }
+
+ {
+ let mut f = fs::OpenOptions::new().append(true).open(&pf)?;
+ for (top_ns, rel) in changed {
+ let (_id, abs_root) = roots.get(top_ns)
+ .ok_or_else(|| anyhow!("unknown root {}", top_ns))?;
+ let src = if rel.is_empty() { abs_root.clone() } else { abs_root.join(rel) };
+ let md = fs::symlink_metadata(&src)?;
+
+ let img_path = if rel.is_empty() { top_ns.clone() } else { format!("{}/{}", top_ns, rel) };
+
+ if md.file_type().is_symlink() {
+ // Symlink: Ziel lesen und als 's' schreiben
+ let target = fs::read_link(&src)?.to_string_lossy().to_string();
+ writeln!(f, "{} s 0777 0 0 {}", img_path, escape_pf(&target))?;
+ } else if md.is_file() {
+ writeln!(f, "{} l {}", img_path, escape_pf(&src.to_string_lossy()))?;
+ }
+ }
+ }
+
+ let empty_src = ctx.temp_path("empty.src");
+ fs::create_dir_all(&empty_src)?;
+
+ let mut cmd = Command::new("mksquashfs");
+ cmd.arg(&empty_src)
+ .arg(out)
+ .arg("-no-progress")
+ .arg("-no-recovery")
+ .arg("-always-use-fragments")
+ .arg("-pf").arg(&pf);
+
+ if ctx.comp_enable {
+ cmd.arg("-comp").arg(&ctx.comp_algo);
+ if let Some(extra) = ctx.comp_args.as_ref() {
+ for tok in shell_split(extra) { cmd.arg(tok); }
+ }
+ } else {
+ cmd.arg("-noI").arg("-noD").arg("-noF");
+ }
+
+ run(&mut cmd, "mksquashfs (-pf)")?;
+ Ok(())
+}
+
+fn escape_pf<S: AsRef<str>>(s: S) -> String {
+ let s = s.as_ref();
+ if s.chars().any(|c| c.is_whitespace() || c == '"') {
+ let escaped = s.replace('\\', "\\\\").replace('"', "\\\"");
+ format!("\"{}\"", escaped)
+ } else {
+ s.to_string()
+ }
+}
+
+fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{
+ require_root("Creatintg LUKS container")?;
+ let plain_size = fs::metadata(plain)?.len();
+ let size = plain_size + 16 * 1024 * 1024;
+ {
+ let f = fs::File::create(out_luks)?;
+ f.set_len(size)?;
+ }
+ let mut c = Command::new("cryptsetup");
+ if let Some(args)=ctx.luks_create_args.as_ref(){ for t in shell_split(args){ c.arg(t);} }
+ c.arg("luksFormat").arg(out_luks);
+ run_interactive(&mut c, "cryptsetup luksFormat")?;
+
+ let mapper = format!("squashr_{}", parse_snap_index(out_luks).unwrap_or(0));
+ let mut o = Command::new("cryptsetup");
+ if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} }
+ o.arg("open").arg(out_luks).arg(&mapper);
+ run_interactive(&mut o, "cryptsetup open")?;
+
+ let dev = format!("/dev/mapper/{}", mapper);
+ let mut dd = Command::new("dd");
+ dd.arg(format!("if={}", plain.display()))
+ .arg(format!("of={}", dev))
+ .arg("bs=4M").arg("status=none").arg("conv=fsync");
+ run(&mut dd, "dd")?;
+
+ let mut close = Command::new("cryptsetup");
+ close.arg("close").arg(&mapper);
+ run(&mut close, "cryptsetup close")?;
+
+ Ok(())
+}
+
+fn cmd_backup(ctx:&mut Ctx)->Result<()>{
+ ensure_includes_nonempty(ctx)?;
+ truncate_logs(ctx)?;
+
+ let mut conn = db_open_init(ctx)?;
+ let tx = conn.transaction()?;
+ let last_id: i64 = tx.query_row("SELECT IFNULL(MAX(id),0) FROM snapshots", [], |r| r.get(0))?;
+ let snap_id = last_id + 1;
+ tx.execute("INSERT INTO snapshots(id,created_at) VALUES(?,datetime('now'))", params![snap_id])?;
+
+ let (now_map, roots) = scan_current(ctx, &tx)?;
+ let prev_map = prev_last_events(&tx)?;
+
+ let mut changed: Vec<(String,String)> = vec![];
+ for ((top, rel), meta) in &now_map {
+ match prev_map.get(&(top.clone(), rel.clone())) {
+ None => changed.push((top.clone(), rel.clone())),
+ Some((ch, psz, pmt)) => {
+ if ch == "d" || *psz as u64 != meta.size || *pmt as i64 != meta.mtime {
+ changed.push((top.clone(), rel.clone()));
+ }
+ }
+ }
+ }
+
+ let prev_present: HashSet<(String,String)> = prev_map.iter()
+ .filter(|(_k,(ch,_,_))| ch != "d")
+ .map(|(k,_v)| k.clone()).collect();
+ let now_keys: HashSet<(String,String)> = now_map.keys().cloned().collect();
+ let deleted: Vec<(String,String)> = prev_present.difference(&now_keys).cloned().collect();
+
+ // SquashFS deterministisch via -pf erzeugen
+ let idx = snap_id as usize;
+ let plain_img = ctx.temp_path("snapshot.plain.squashfs");
+
+ if changed.is_empty() {
+ // Leeres Delta → minimales Image
+ let empty_src = ctx.temp_path("empty.src");
+ fs::create_dir_all(&empty_src)?;
+ let mut cmd = Command::new("mksquashfs");
+ cmd.arg(&empty_src).arg(&plain_img).arg("-no-progress").arg("-no-recovery");
+ if ctx.comp_enable {
+ cmd.arg("-comp").arg(&ctx.comp_algo);
+ if let Some(extra)=ctx.comp_args.as_ref() { for tok in shell_split(extra){ cmd.arg(tok); } }
+ }
+ run(&mut cmd, "mksquashfs (empty)")?;
+ } else {
+ build_squash_image_pf(ctx, &plain_img, &changed, &roots)?;
+ }
+
+ let final_path = if ctx.luks_enable {
+ let out = ctx.snapshot_path(idx, true);
+ encrypt_into_luks(ctx, &plain_img, &out)?;
+ fs::remove_file(&plain_img).ok();
+ out
+ } else {
+ let out = ctx.snapshot_path(idx, false);
+ fs::rename(&plain_img, &out)?;
+ out
+ };
+
+ for (top, rel) in &changed {
+ let (root_id, abs_root) = roots.get(top).unwrap();
+ let real = if rel.is_empty(){ abs_root.clone() } else { abs_root.join(rel) };
+ let md = fs::symlink_metadata(&real)?;
+ let kind = if md.file_type().is_symlink() { 'l' } else { 'f' };
+ let fid = ensure_file_row(&tx, *root_id, rel, kind)?;
+ let ev = match prev_map.get(&(top.clone(), rel.clone())) {
+ None => "c",
+ Some((ch,_,_)) if ch == "d" => "c",
+ _ =>"m"
+ };
+ tx.execute("INSERT OR REPLACE INTO events(snapshot_id,file_id,change,size,mtime,checksum) VALUES(?,?,?,?,?,NULL)",
+ params![snap_id, fid, ev, md.len() as i64, md.mtime()])?;
+ }
+ for (top, rel) in &deleted {
+ let (root_id, _abs) = roots.get(top).unwrap();
+ let fid = ensure_file_row(&tx, *root_id, rel, 'f')?;
+ tx.execute("INSERT OR REPLACE INTO events(snapshot_id,file_id,change,size,mtime,checksum) VALUES(?,?,?,?,?,NULL)",
+ params![snap_id, fid, "d", 0i64, 0i64])?;
+ }
+
+ tx.commit()?;
+ println!("Created new snapshot: {:04}", final_path.display());
+
+ rotate_if_needed(ctx)?;
+ Ok(())
+}
+
+fn rotate_if_needed(ctx:&mut Ctx)->Result<()>{
+ if let Some(max)=ctx.max_keep {
+ loop {
+ let snaps = ctx.list_snapshots()?;
+ if snaps.len() <= max { break; }
+ merge_first_two(ctx)?;
+ }
+ }
+ Ok(())
+}
+
+fn present_state_up_to(conn:&Connection, upto:i64) -> Result<Vec<(String,String,i64,i64,String)>> {
+ let present_sql = r#"
+ SELECT r.top_name, f.rel_path, e.size, e.mtime, f.kind
+ FROM files f
+ JOIN roots r ON r.id=f.root_id
+ JOIN events e ON e.file_id=f.id
+ WHERE e.snapshot_id = (
+ SELECT MAX(e2.snapshot_id) FROM events e2 WHERE e2.file_id=f.id AND e2.snapshot_id <= ?
+ ) AND e.change!='d'
+ "#;
+ let mut stmt = conn.prepare(present_sql)?;
+ let rows = stmt.query_map(params![upto], |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?, r.get(4)?)))?;
+ let mut v = vec![];
+ for r in rows { v.push(r?); }
+ Ok(v)
+}
+
+fn merge_first_two(ctx:&mut Ctx)->Result<()>{
+ require_root("merge (mounts / overlay)")?;
+ let snaps = ctx.list_snapshots()?;
+ if snaps.len() < 2 { return Ok(()); }
+ let s1 = &snaps[0];
+ let s2 = &snaps[1];
+ let idx1 = parse_snap_index(s1)?;
+ let idx2 = parse_snap_index(s2)?;
+ if idx2 != idx1+1 { bail!("Unexpected indices: {:04} + {:04}", idx1, idx2); }
+
+ let m1 = ctx.mounts_dir.join(format!("merge_{:04}", idx1));
+ let m2 = ctx.mounts_dir.join(format!("merge_{:04}", idx2));
+ fs::create_dir_all(&m1)?;
+ fs::create_dir_all(&m2)?;
+ mount_image_ro(ctx, s1, &m1)?;
+ mount_image_ro(ctx, s2, &m2)?;
+
+ let upper = ctx.temp_path("merge.upper");
+ let work = ctx.temp_path("merge.work");
+ let view = ctx.temp_path("merge.view");
+ fs::create_dir_all(&upper)?;
+ fs::create_dir_all(&work)?;
+ fs::create_dir_all(&view)?;
+ let loweropt = format!("{}:{}", abspath(&m1).display(), abspath(&m2).display()); // s2 über s1
+ mount_overlay(&loweropt, &upper, &work, &view)?;
+
+ let tmp_plain = ctx.temp_path("merge.plain.sqsh");
+ let mut cmd = Command::new("mksquashfs");
+ cmd.arg(&view).arg(&tmp_plain).arg("-no-progress").arg("-no-recovery");
+ if ctx.comp_enable {
+ cmd.arg("-comp").arg(&ctx.comp_algo);
+ if let Some(extra)=ctx.comp_args.as_ref(){ for tok in shell_split(extra){ cmd.arg(tok);} }
+ }
+ run(&mut cmd, "mksquashfs (merge)")?;
+
+ let _ = umount(&view);
+ let _ = umount(&m2);
+ let _ = umount(&m1);
+
+ let is_luks = s1.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
+ let out_path = ctx.snapshot_path(idx1, is_luks);
+ if s1.exists(){ fs::remove_file(s1)?; }
+ if is_luks {
+ encrypt_into_luks(ctx, &tmp_plain, &out_path)?;
+ fs::remove_file(&tmp_plain).ok();
+ } else {
+ fs::rename(&tmp_plain, &out_path)?;
+ }
+ if s2.exists(){ fs::remove_file(s2)?; }
+ let rest = ctx.list_snapshots()?;
+ for p in rest {
+ let n = parse_snap_index(&p)?;
+ if n > idx2 {
+ let is_luks = p.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
+ let newp = ctx.snapshot_path(n-1, is_luks);
+ fs::rename(&p, &newp)?;
+ }
+ }
+
+ let mut conn = db_open_init(ctx)?;
+ let tx = conn.transaction()?;
+
+ let present = present_state_up_to(&tx, 2)?;
+ tx.execute("DELETE FROM events WHERE snapshot_id IN (1,2)", [])?;
+ tx.execute("DELETE FROM snapshots WHERE id IN (1,2)", [])?;
+ tx.execute("INSERT INTO snapshots(id,created_at) VALUES(1,datetime('now'))", [])?;
+ for (top, rel, sz, mt, kind) in present {
+ let root_id: i64 = tx.query_row("SELECT id FROM roots WHERE top_name=?", params![top], |r| r.get(0))?;
+ let fid = ensure_file_row(&tx, root_id, &rel, kind.chars().next().unwrap_or('f'))?;
+ tx.execute("INSERT OR REPLACE INTO events(snapshot_id,file_id,change,size,mtime,checksum) VALUES(?,?,?,?,?,NULL)",
+ params![1i64, fid, "c", sz, mt])?;
+ }
+ tx.execute("UPDATE events SET snapshot_id = snapshot_id - 1 WHERE snapshot_id > 2", [])?;
+ tx.execute("UPDATE snapshots SET id = id - 1 WHERE id > 2", [])?;
+ tx.commit()?;
+
+ println!("Merged snapshots {:04} + {:04} → {:04}.", idx1, idx2, idx1);
+ Ok(())
+}
+
+fn create_whiteouts(upper:&Path, deletes:&[(String,String)]) -> Result<()> {
+ for (top_ns, rel) in deletes {
+ let p = Path::new(rel);
+ let parent = upper.join(top_ns).join(p.parent().unwrap_or(Path::new("")));
+ fs::create_dir_all(&parent)?;
+ let base = p.file_name().ok_or_else(|| anyhow!("Empty filename: {}", rel))?
+ .to_string_lossy().to_string();
+ let wh = parent.join(format!(".wh.{}", base));
+ if wh.exists() { continue; }
+ let mut cmd = Command::new("mknod");
+ cmd.arg(&wh).arg("c").arg("0").arg("0");
+ run(&mut cmd, "mknod whiteout")?;
+ }
+ Ok(())
+}
+
+fn mount_image_ro(ctx:&Ctx, img:&Path, mnt:&Path)->Result<()>{
+ require_root("Mount SquashFS")?;
+ let mnt_abs = abspath(mnt);
+ if img.extension().and_then(|e| e.to_str()) == Some("luks") {
+ let mapper = format!(
+ "squashr_mount_{}",
+ img.file_stem().and_then(|s| s.to_str()).unwrap_or("img")
+ );
+ let dev = format!("/dev/mapper/{}", mapper);
+
+ let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status();
+
+ let mut o = Command::new("cryptsetup");
+ if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} }
+ o.arg("open").arg(img).arg(&mapper);
+ run_interactive(&mut o, "cryptsetup open (mount)")?;
+
+ let mut cmd = Command::new("mount");
+ cmd.arg("-t").arg("squashfs").arg("-o").arg("ro").arg(&dev).arg(&mnt_abs);
+ run(&mut cmd, "mount squashfs (luks)")?;
+ } else {
+ let mut cmd = Command::new("mount");
+ cmd.arg("-t").arg("squashfs").arg("-o").arg("loop,ro").arg(img).arg(&mnt_abs);
+ run(&mut cmd, "mount squashfs")?;
+ }
+ Ok(())
+}
+
+fn mount_overlay(lowerdirs:&str, upper:&Path, work:&Path, target:&Path)->Result<()>{
+ require_root("Mount OverlayFS")?;
+ let opt = format!(
+ "lowerdir={},upperdir={},workdir={}",
+ lowerdirs,
+ abspath(upper).display(),
+ abspath(work).display()
+ );
+ let mut cmd = Command::new("mount");
+ cmd.arg("-t").arg("overlay").arg("overlay")
+ .arg("-o").arg(opt)
+ .arg(abspath(target));
+ run(&mut cmd, "mount overlay")?;
+ Ok(())
+}
+
+fn apply_whiteouts_via_unlink(view_root:&Path, deletes:&[(String,String)]) -> Result<()> {
+ for (top_ns, rel) in deletes {
+ let p = view_root.join(top_ns).join(rel);
+ match fs::remove_file(&p) {
+ Ok(_) => {},
+ Err(e) if e.kind() == ErrorKind::NotFound => {
+ // okay
+ },
+ Err(e) => return Err(anyhow!("Unlink failed for {}: {}", p.display(), e)),
+ }
+ }
+ Ok(())
+}
+
+fn umount(path:&Path)->Result<()>{
+ require_root("umount")?;
+ let mut cmd = Command::new("umount");
+ cmd.arg(path);
+ run(&mut cmd, "umount")?;
+ Ok(())
+}
+
+// --- NEU: robustes Umount, ignoriert "not mounted" (Exit-Code 32)
+fn umount_ignore_not_mounted(path:&Path)->Result<()>{
+ require_root("umount")?;
+ let status = Command::new("umount")
+ .arg(path)
+ .status()
+ .with_context(|| format!("umount {}", path.display()))?;
+ if !status.success() {
+ if let Some(32) = status.code() {
+ // EX_NOTMNT – war kein Mountpoint; passt.
+ return Ok(());
+ }
+ bail!("umount failed for {}", path.display());
+ }
+ Ok(())
+}
+
+fn crypt_close_ignore(name:&str){
+ let _ = Command::new("cryptsetup").arg("close").arg(name).status();
+}
+
+fn find_snapshot_file(ctx:&Ctx, idx:usize)->Result<PathBuf>{
+ let p1 = ctx.snapshot_path(idx, false);
+ let p2 = ctx.snapshot_path(idx, true);
+ if p1.exists(){ Ok(p1) }
+ else if p2.exists(){ Ok(p2) }
+ else { bail!("Snapshot not found: {}.", idx); }
+}
+
+fn cmd_mount(ctx:&mut Ctx, s:Option<usize>, target:&Path)->Result<()>{
+ require_root("mount")?;
+ fs::create_dir_all(target)?;
+ let snaps = ctx.list_snapshots()?;
+ if snaps.is_empty(){ bail!("No snapshots found."); }
+
+ let upto = if let Some(n)=s {
+ let max = snaps.len();
+ if n==0 || n>max { bail!("Snapshot -s {} is invalid (1..={}).", n, max); }
+ n
+ } else { snaps.len() };
+
+ if upto == 1 {
+ let img = find_snapshot_file(ctx, 1)?;
+ mount_image_ro(ctx, &img, &abspath(target))?;
+ println!("Mounted to {}.", abspath(target).display());
+ return Ok(());
+ }
+
+ // MULTI-LAYER
+ let mut lowers: Vec<PathBuf> = vec![];
+ for i in 1..=upto {
+ let img = find_snapshot_file(ctx, i)?;
+ let mnt = ctx.mounts_dir.join(format!("snap_{:04}", i));
+ fs::create_dir_all(&mnt)?;
+ mount_image_ro(ctx, &img, &mnt)?;
+ lowers.push(abspath(&mnt));
+ }
+
+ let upper = abspath(&ctx.temp_path("overlay.upper"));
+ let work = abspath(&ctx.temp_path("overlay.work"));
+ fs::create_dir_all(&upper)?;
+ fs::create_dir_all(&work)?;
+
+ let loweropt = lowers.iter().rev().map(|p| p.display().to_string()).join(":"); // neuester zuerst
+ mount_overlay(&loweropt, &upper, &work, &abspath(target))?;
+
+ // <<<<<< NEU: Whiteouts NACH dem Mount via unlink erzeugen
+ let mut conn = db_open_init(ctx)?;
+ let deletes = deletions_up_to(&conn, upto as i64)?;
+ if !deletes.is_empty() {
+ apply_whiteouts_via_unlink(&abspath(target), &deletes)?;
+ }
+
+ println!("Mounted to {} up to snapshot {:04}.", target.display(), upto);
+ println!("Unmount with: umount {} ; und die snap-Mounts unter {}", target.display(), ctx.mounts_dir.display());
+ Ok(())
+}
+
+fn cmd_umount(ctx:&mut Ctx, target:&Path)->Result<()>{
+ require_root("umount")?;
+
+ // 1) Overlay-Ziel aushängen (falls gemountet)
+ match umount_ignore_not_mounted(&abspath(target)) {
+ Ok(_) => println!("Umounted {} (overlay/target, falls vorhanden).", abspath(target).display()),
+ Err(e) => println!("Hinweis: {} (fahre fort)", e),
+ }
+
+ // 2) Alle per-snapshot Mounts unter mounts_dir aushängen
+ let mut entries: Vec<_> = fs::read_dir(&ctx.mounts_dir)?
+ .filter_map(|e| e.ok())
+ .map(|e| e.path())
+ .filter(|p| p.is_dir())
+ .collect();
+ // in umgekehrter Reihenfolge, damit höhere Indizes zuerst fallen
+ entries.sort();
+ entries.reverse();
+
+ for mnt in &entries {
+ let _ = umount_ignore_not_mounted(mnt);
+ }
+
+ // 3) LUKS-Mapper schließen, wenn vorhanden
+ for mnt in &entries {
+ if let Some(name) = mnt.file_name().and_then(|s| s.to_str()) {
+ // Verzeichnisnamen sind "snap_0001" / "merge_0001"
+ if let Some(idx_str) = name.split('_').nth(1) {
+ // Versuche beide möglichen Mapper-Namen (je nach file_stem):
+ // "squashr_mount_0001" und "squashr_mount_0001.squashfs"
+ let mapper_a = format!("squashr_mount_{}", idx_str);
+ let mapper_b = format!("squashr_mount_{}.squashfs", idx_str);
+ crypt_close_ignore(&mapper_a);
+ crypt_close_ignore(&mapper_b);
+ }
+ }
+ }
+
+ // 4) Leere Mount-Verzeichnisse wegräumen (optional & best effort)
+ for mnt in entries {
+ let _ = fs::remove_dir(mnt);
+ }
+
+ // 5) Verwaiste temporäre Upper/Work-Verzeichnisse aus der letzten Mount-Session aufräumen
+ if let Ok(ws) = fs::read_dir(&ctx.work_dir) {
+ for e in ws.filter_map(|e| e.ok()) {
+ if let Some(n) = e.file_name().to_str() {
+ if n.starts_with("overlay.upper_") || n.starts_with("overlay.work_") {
+ let _ = fs::remove_dir_all(e.path());
+ }
+ }
+ }
+ }
+ println!("Umount abgeschlossen.");
+ Ok(())
+}
+
+fn cmd_delete(ctx:&mut Ctx, s:usize)->Result<()>{
+ let path_plain = ctx.snapshot_path(s, false);
+ let path_luks = ctx.snapshot_path(s, true);
+ if !path_plain.exists() && !path_luks.exists() { bail!("Snapshot {} does not exist.", s); }
+ if path_plain.exists(){ fs::remove_file(&path_plain)?; }
+ if path_luks.exists(){ fs::remove_file(&path_luks)?; }
+
+ let snaps = ctx.list_snapshots()?;
+ for p in snaps {
+ let n = parse_snap_index(&p)?;
+ if n > s {
+ let is_luks = p.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
+ let newp = ctx.snapshot_path(n-1, is_luks);
+ fs::rename(&p, &newp)?;
+ }
+ }
+
+ // DB anpassen
+ let mut conn = db_open_init(ctx)?;
+ let tx = conn.transaction()?;
+ tx.execute("DELETE FROM events WHERE snapshot_id=?", params![s as i64])?;
+ tx.execute("DELETE FROM snapshots WHERE id=?", params![s as i64])?;
+ tx.execute("UPDATE events SET snapshot_id = snapshot_id - 1 WHERE snapshot_id > ?", params![s as i64])?;
+ tx.execute("UPDATE snapshots SET id = id - 1 WHERE id > ?", params![s as i64])?;
+ tx.commit()?;
+ println!("Deleted snapshot {}, decrementing children.", s);
+ Ok(())
+}
+
+fn cmd_new(ctx:&mut Ctx)->Result<()>{
+ for s in ctx.list_snapshots()? { fs::remove_file(s)?; }
+ let mut conn = db_open_init(ctx)?;
+ conn.execute("DELETE FROM events", [])?;
+ conn.execute("DELETE FROM snapshots", [])?;
+ println!("Database cleared. Creating new initial snapshot.");
+ cmd_backup(ctx)
+}
+
+fn cmd_minimize(ctx:&mut Ctx, n_opt:Option<usize>)->Result<()>{
+ let target = n_opt.unwrap_or(ctx.min_keep);
+ if target < ctx.min_keep {
+ bail!("n ({}) < SQUASHR_N_SNAPSHOTS_MIN ({}) not allowed.", target, ctx.min_keep);
+ }
+ loop {
+ let snaps = ctx.list_snapshots()?;
+ if snaps.len() <= target { break; }
+ merge_first_two(ctx)?;
+ }
+ println!("Database minimized. New snapshot count: {}", ctx.list_snapshots()?.len());
+ Ok(())
+}