/* SquashR - Manage backups based on SquashFS, OverlayFS and LUKS * Copyright (C) 2025 Leonard Kugis * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ use anyhow::{anyhow, bail, Context, Result}; use clap::{Parser, Subcommand, ValueHint}; use itertools::Itertools; use regex::Regex; use serde::Deserialize; use std::collections::{BTreeSet, HashMap, HashSet}; use std::env; use std::fs; use std::io::{Read, Write}; use std::io::ErrorKind; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use walkdir::WalkDir; // kleine Helfer für Verbose-Logs macro_rules! vlog { ($ctx:expr, $($arg:tt)*) => { if $ctx.verbose { eprintln!($($arg)*); } }; } #[derive(Parser, Debug)] #[command(name="squashr", version, about="Manage backups based on SquashFS, OverlayFS and LUKS")] struct Cli { #[arg(short, long, value_hint=ValueHint::FilePath)] config: Option, #[arg(long, value_hint=ValueHint::DirPath)] squashr_root: Option, #[arg(long)] squashr_n_snapshots_min: Option, #[arg(long)] squashr_n_snapshots_max: Option, #[arg(long)] squashr_include: Option, #[arg(long)] squashr_exclude: Option, #[arg(long)] squashr_truncate: Option, #[arg(long)] squashr_compression_enable: Option, #[arg(long)] squashr_compression_algo: Option, #[arg(long, allow_hyphen_values = true)] squashr_compression_args: Option, #[arg(long)] squashr_cryptsetup_enable: Option, #[arg(long, allow_hyphen_values = true)] squashr_cryptsetup_create_args: Option, #[arg(long, allow_hyphen_values = true)] squashr_cryptsetup_open_args: Option, /// Passphrase für cryptsetup; ACHTUNG: Sichtbar im Prozesslisting. #[arg(long)] squashr_cryptsetup_pass: Option, /// Datei mit Passphrase (roh, binär möglich); hat Vorrang, wenn beides gesetzt ist. #[arg(long, value_hint=ValueHint::FilePath)] squashr_cryptsetup_pass_file: Option, /// Mehr Ausgaben, inkl. Auflistung gesicherter Dateien #[arg(short, long)] verbose: bool, #[command(subcommand)] cmd: Cmd, } #[derive(Subcommand, Debug)] enum Cmd { Backup, Minimize { #[arg(short = 'n')] n: Option }, New, Mount { #[arg(short = 's')] s: Option, target: PathBuf }, Delete { #[arg(short = 's')] s: usize }, /// Hängt SquashR-relevante Mounts aus (ohne Argument: alle; mit Pfad: nur diesen). Umount { target: Option }, } #[derive(Debug, Clone, Deserialize)] struct Config { SQUASHR_ROOT: Option, SQUASHR_N_SNAPSHOTS_MIN: Option, SQUASHR_N_SNAPSHOTS_MAX: Option, SQUASHR_INCLUDE: Option>, SQUASHR_EXCLUDE: Option>, SQUASHR_TRUNCATE: Option>, SQUASHR_COMPRESSION_ENABLE: Option, SQUASHR_COMPRESSION_ALGO: Option, SQUASHR_COMPRESSION_ARGS: Option, SQUASHR_CRYPTSETUP_ENABLE: Option, SQUASHR_CRYPTSETUP_CREATE_ARGS: Option, SQUASHR_CRYPTSETUP_OPEN_ARGS: Option, SQUASHR_CRYPTSETUP_PASS: Option, SQUASHR_CRYPTSETUP_PASS_FILE: Option, } impl Default for Config { fn default() -> Self { Self { SQUASHR_ROOT: None, SQUASHR_N_SNAPSHOTS_MIN: Some(5), SQUASHR_N_SNAPSHOTS_MAX: Some(30), SQUASHR_INCLUDE: None, SQUASHR_EXCLUDE: None, SQUASHR_TRUNCATE: None, SQUASHR_COMPRESSION_ENABLE: Some(true), SQUASHR_COMPRESSION_ALGO: Some("zstd".to_string()), SQUASHR_COMPRESSION_ARGS: Some("-Xcompression-level 19".to_string()), SQUASHR_CRYPTSETUP_ENABLE: Some(false), SQUASHR_CRYPTSETUP_CREATE_ARGS: Some("--type luks2".to_string()), SQUASHR_CRYPTSETUP_OPEN_ARGS: Some("--type luks".to_string()), SQUASHR_CRYPTSETUP_PASS: None, SQUASHR_CRYPTSETUP_PASS_FILE: None, } } } fn parse_list(s: &str) -> Vec { let mut out: Vec = vec![]; for line in s.lines() { let t = line.trim(); if !t.is_empty() { out.push(t.to_string()); } } out = out.into_iter() .flat_map(|x| x.split(',').map(|y| y.trim().to_string()).collect::>()) .collect(); out.into_iter().filter(|x| !x.is_empty()).collect() } fn load_config(path: Option<&Path>) -> Result { let mut cfg = Config::default(); if let Some(p) = path { if p.exists() { let text = fs::read_to_string(p).with_context(|| format!("Konfig lesen: {}", p.display()))?; let mut map: HashMap = HashMap::new(); for line in text.lines() { let l = line.trim(); if l.is_empty() || l.starts_with('#') { continue; } if let Some((k,v)) = l.split_once('=') { map.insert(k.trim().to_string(), v.trim().to_string()); } } let assign = |k:&str, f: &mut dyn FnMut(String)| { if let Some(v)=map.get(k).cloned(){ f(v) } }; let set = |k:&str, f:&mut dyn FnMut(String)| assign(k, f); set("SQUASHR_ROOT", &mut |v| cfg.SQUASHR_ROOT = Some(PathBuf::from(v))); set("SQUASHR_N_SNAPSHOTS_MIN", &mut |v| cfg.SQUASHR_N_SNAPSHOTS_MIN = v.parse().ok()); set("SQUASHR_N_SNAPSHOTS_MAX", &mut |v| cfg.SQUASHR_N_SNAPSHOTS_MAX = v.parse().ok()); set("SQUASHR_INCLUDE", &mut |v| cfg.SQUASHR_INCLUDE = Some(parse_list(&v))); set("SQUASHR_EXCLUDE", &mut |v| cfg.SQUASHR_EXCLUDE = Some(parse_list(&v))); set("SQUASHR_TRUNCATE", &mut |v| cfg.SQUASHR_TRUNCATE = Some(parse_list(&v))); set("SQUASHR_COMPRESSION_ENABLE", &mut |v| cfg.SQUASHR_COMPRESSION_ENABLE = v.parse().ok()); set("SQUASHR_COMPRESSION_ALGO", &mut |v| cfg.SQUASHR_COMPRESSION_ALGO = Some(v)); set("SQUASHR_COMPRESSION_ARGS", &mut |v| cfg.SQUASHR_COMPRESSION_ARGS = Some(v)); set("SQUASHR_CRYPTSETUP_ENABLE", &mut |v| cfg.SQUASHR_CRYPTSETUP_ENABLE = v.parse().ok()); set("SQUASHR_CRYPTSETUP_CREATE_ARGS", &mut |v| cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS = Some(v)); set("SQUASHR_CRYPTSETUP_OPEN_ARGS", &mut |v| cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS = Some(v)); set("SQUASHR_CRYPTSETUP_PASS", &mut |v| cfg.SQUASHR_CRYPTSETUP_PASS = Some(v)); set("SQUASHR_CRYPTSETUP_PASS_FILE", &mut |v| cfg.SQUASHR_CRYPTSETUP_PASS_FILE = Some(PathBuf::from(v))); } } Ok(cfg) } fn bool_env(name:&str)->Option{ env::var(name).ok()?.parse().ok() } fn str_env(name:&str)->Option{ env::var(name).ok() } fn list_env(name:&str)->Option>{ env::var(name).ok().map(|v| parse_list(&v)) } fn apply_env_overrides(mut cfg: Config)->Result{ if let Some(v)=str_env("SQUASHR_ROOT"){ cfg.SQUASHR_ROOT=Some(PathBuf::from(v)); } if let Some(v)=env::var("SQUASHR_N_SNAPSHOTS_MIN").ok().and_then(|x| x.parse().ok()){ cfg.SQUASHR_N_SNAPSHOTS_MIN=Some(v); } if let Some(v)=env::var("SQUASHR_N_SNAPSHOTS_MAX").ok().and_then(|x| x.parse().ok()){ cfg.SQUASHR_N_SNAPSHOTS_MAX=Some(v); } if let Some(v)=list_env("SQUASHR_INCLUDE"){ cfg.SQUASHR_INCLUDE=Some(v); } if let Some(v)=list_env("SQUASHR_EXCLUDE"){ cfg.SQUASHR_EXCLUDE=Some(v); } if let Some(v)=list_env("SQUASHR_TRUNCATE"){ cfg.SQUASHR_TRUNCATE=Some(v); } if let Some(v)=bool_env("SQUASHR_COMPRESSION_ENABLE"){ cfg.SQUASHR_COMPRESSION_ENABLE=Some(v); } if let Some(v)=str_env("SQUASHR_COMPRESSION_ALGO"){ cfg.SQUASHR_COMPRESSION_ALGO=Some(v); } if let Some(v)=str_env("SQUASHR_COMPRESSION_ARGS"){ cfg.SQUASHR_COMPRESSION_ARGS=Some(v); } if let Some(v)=bool_env("SQUASHR_CRYPTSETUP_ENABLE"){ cfg.SQUASHR_CRYPTSETUP_ENABLE=Some(v); } if let Some(v)=str_env("SQUASHR_CRYPTSETUP_CREATE_ARGS"){ cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS=Some(v); } if let Some(v)=str_env("SQUASHR_CRYPTSETUP_OPEN_ARGS"){ cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS=Some(v); } if let Some(v)=str_env("SQUASHR_CRYPTSETUP_PASS"){ cfg.SQUASHR_CRYPTSETUP_PASS=Some(v); } if let Some(v)=str_env("SQUASHR_CRYPTSETUP_PASS_FILE"){ cfg.SQUASHR_CRYPTSETUP_PASS_FILE=Some(PathBuf::from(v)); } Ok(cfg) } fn apply_cli_overrides(mut cfg: Config, cli:&Cli)->Result{ if let Some(v)=cli.squashr_root.clone(){ cfg.SQUASHR_ROOT=Some(v); } if let Some(v)=cli.squashr_n_snapshots_min{ cfg.SQUASHR_N_SNAPSHOTS_MIN=Some(v); } if let Some(v)=cli.squashr_n_snapshots_max{ cfg.SQUASHR_N_SNAPSHOTS_MAX=Some(v); } if let Some(v)=cli.squashr_include.as_ref(){ cfg.SQUASHR_INCLUDE=Some(parse_list(v)); } if let Some(v)=cli.squashr_exclude.as_ref(){ cfg.SQUASHR_EXCLUDE=Some(parse_list(v)); } if let Some(v)=cli.squashr_truncate.as_ref(){ cfg.SQUASHR_TRUNCATE=Some(parse_list(v)); } if let Some(v)=cli.squashr_compression_enable{ cfg.SQUASHR_COMPRESSION_ENABLE=Some(v); } if let Some(v)=cli.squashr_compression_algo.clone(){ cfg.SQUASHR_COMPRESSION_ALGO=Some(v); } if let Some(v)=cli.squashr_compression_args.clone(){ cfg.SQUASHR_COMPRESSION_ARGS=Some(v); } if let Some(v)=cli.squashr_cryptsetup_enable{ cfg.SQUASHR_CRYPTSETUP_ENABLE=Some(v); } if let Some(v)=cli.squashr_cryptsetup_create_args.clone(){ cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS=Some(v); } if let Some(v)=cli.squashr_cryptsetup_open_args.clone(){ cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS=Some(v); } if let Some(v)=cli.squashr_cryptsetup_pass.clone(){ cfg.SQUASHR_CRYPTSETUP_PASS=Some(v); } if let Some(v)=cli.squashr_cryptsetup_pass_file.clone(){ cfg.SQUASHR_CRYPTSETUP_PASS_FILE=Some(v); } Ok(cfg) } #[derive(Clone)] struct Ctx { root: PathBuf, state_dir: PathBuf, mounts_dir: PathBuf, work_dir: PathBuf, tar_snapshot: PathBuf, // GNU-tar Snapshot-Datei (materialisiert für tar) min_keep: usize, max_keep: Option, include: Vec, exclude: Vec, truncate: Vec, comp_enable: bool, comp_algo: String, comp_args: Option, luks_enable: bool, luks_create_args: Option, luks_open_args: Option, luks_pass: Option>, verbose: bool, } impl Ctx { fn new(cfg: Config, verbose: bool) -> Result { let root = cfg.SQUASHR_ROOT.ok_or_else(|| anyhow!("SQUASHR_ROOT not set!"))?; let state_dir = root.join("state"); let mounts_dir = root.join("mounts"); let work_dir = root.join("work"); fs::create_dir_all(&state_dir)?; fs::create_dir_all(&mounts_dir)?; fs::create_dir_all(&work_dir)?; let tar_snapshot = state_dir.join("tar.snapshot"); let max_raw = cfg.SQUASHR_N_SNAPSHOTS_MAX.unwrap_or(0); let max_keep = if max_raw == 0 { None } else { Some(max_raw) }; // Passphrase ermitteln (CLI-String hat Vorrang vor Datei) let luks_pass = if let Some(p) = cfg.SQUASHR_CRYPTSETUP_PASS { Some(p.into_bytes()) } else if let Some(f) = cfg.SQUASHR_CRYPTSETUP_PASS_FILE { Some(fs::read(&f).with_context(|| format!("Pass-Datei lesen: {}", f.display()))?) } else { None }; Ok(Self { root, state_dir, mounts_dir, work_dir, tar_snapshot, min_keep: cfg.SQUASHR_N_SNAPSHOTS_MIN.unwrap_or(5), max_keep, include: cfg.SQUASHR_INCLUDE.unwrap_or_default().into_iter().map(PathBuf::from).collect(), exclude: cfg.SQUASHR_EXCLUDE.unwrap_or_default().into_iter().map(PathBuf::from).collect(), truncate: cfg.SQUASHR_TRUNCATE.unwrap_or_default().into_iter().map(PathBuf::from).collect(), comp_enable: cfg.SQUASHR_COMPRESSION_ENABLE.unwrap_or(true), comp_algo: cfg.SQUASHR_COMPRESSION_ALGO.unwrap_or_else(|| "zstd".into()), comp_args: cfg.SQUASHR_COMPRESSION_ARGS, luks_enable: cfg.SQUASHR_CRYPTSETUP_ENABLE.unwrap_or(false), luks_create_args: cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS, luks_open_args: cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS, luks_pass, verbose, }) } fn list_snapshots(&self) -> Result> { let mut entries: Vec<_> = fs::read_dir(&self.state_dir)? .filter_map(|e| e.ok()) .map(|e| e.path()) .filter(|p| p.is_file()) .filter(|p| { p.file_name() .and_then(|n| n.to_str()) .map(|s| s.ends_with(".squashfs") || s.ends_with(".squashfs.luks")) .unwrap_or(false) }) .filter(|p| { // meta-Container NICHT als "Snapshot" zählen let fname = p.file_name().and_then(|s| s.to_str()).unwrap_or(""); fname != "meta.squashfs" && fname != "meta.squashfs.luks" }) .collect(); entries.sort(); Ok(entries) } fn snapshot_path(&self, idx: usize, encrypted: bool) -> PathBuf { let name = format!("{:04}{}", idx, if encrypted { ".squashfs.luks" } else { ".squashfs" }); self.state_dir.join(name) } fn manifest_name(idx: usize) -> String { format!("manifest_{:04}.txt", idx) } fn meta_plain_path(&self) -> PathBuf { self.state_dir.join("meta.squashfs") } fn meta_luks_path(&self) -> PathBuf { self.state_dir.join("meta.squashfs.luks") } fn temp_path(&self, stem: &str) -> PathBuf { let ts = chrono::Local::now().format("%Y%m%d-%H%M%S"); self.work_dir.join(format!("{}_{}", stem, ts)) } } fn main() -> Result<()> { let cli = Cli::parse(); let cfg = load_config(cli.config.as_deref())?; let cfg = apply_env_overrides(cfg)?; let cfg = apply_cli_overrides(cfg, &cli)?; let mut ctx = Ctx::new(cfg, cli.verbose)?; match cli.cmd { Cmd::Backup => cmd_backup(&mut ctx), Cmd::Minimize { n } => cmd_minimize(&mut ctx, n), Cmd::New => cmd_new(&mut ctx), Cmd::Mount { s, target } => cmd_mount(&mut ctx, s, &target), Cmd::Delete { s } => cmd_delete(&mut ctx, s), Cmd::Umount { target } => cmd_umount(&mut ctx, target.as_deref()), } } fn abspath(p: &Path) -> PathBuf { fs::canonicalize(p).unwrap_or_else(|_| p.to_path_buf()) } fn abs_key(p: &Path) -> Result { let abs = fs::canonicalize(p)?; let s = abs.to_string_lossy(); Ok(s.trim_start_matches('/').to_string()) } fn parse_snap_index(p:&Path)->Result{ let fname = p.file_name().and_then(|s| s.to_str()).ok_or_else(||anyhow!("Invalid filename: {}", p.display()))?; // robust: nur führende 4 Ziffern extrahieren let re = Regex::new(r"^(\d{4})").unwrap(); let caps = re.captures(fname).ok_or_else(|| anyhow!("No index found: {}", p.display()))?; Ok(caps.get(1).unwrap().as_str().parse::().unwrap()) } fn shell_split(s:&str)->Vec{ s.split_whitespace().map(|x| x.to_string()).collect() } fn run(cmd:&mut Command, desc:&str)->Result<()>{ let status = cmd.status().with_context(|| format!("Unable to start {}", desc))?; if !status.success(){ bail!("Command failed: {}", desc); } Ok(()) } fn run_ok_status(cmd:&mut Command)->std::io::Result{ cmd.status() } fn run_interactive(cmd:&mut Command, desc:&str)->Result<()>{ cmd.stdin(Stdio::inherit()).stdout(Stdio::inherit()).stderr(Stdio::inherit()); run(cmd, desc) } fn run_with_stdin(cmd:&mut Command, input:&[u8], desc:&str)->Result<()>{ cmd.stdin(Stdio::piped()).stdout(Stdio::inherit()).stderr(Stdio::inherit()); let mut child = cmd.spawn().with_context(|| format!("Unable to start {}", desc))?; { let mut stdin = child.stdin.take().ok_or_else(|| anyhow!("Cannot open stdin for {}", desc))?; stdin.write_all(input)?; } let status = child.wait()?; if !status.success(){ bail!("Command failed: {}", desc); } Ok(()) } fn cryptsetup_run(cmd:&mut Command, pass: Option<&[u8]>, desc:&str)->Result<()>{ if pass.is_some() { cmd.arg("--key-file").arg("-"); cmd.arg("--batch-mode"); run_with_stdin(cmd, pass.unwrap(), desc) } else { run_interactive(cmd, desc) } } fn require_root(op: &str) -> Result<()> { if unsafe { libc::geteuid() } != 0 { bail!("{op} requires root privileges."); } Ok(()) } fn ensure_includes_nonempty(ctx:&Ctx)->Result<()>{ if ctx.include.is_empty(){ bail!("SQUASHR_INCLUDE is empty."); } Ok(()) } fn canonicalize_or_same(p: &Path) -> PathBuf { fs::canonicalize(p).unwrap_or_else(|_| p.to_path_buf()) } fn build_prune_set_for_root(abs_root: &Path, ctx: &Ctx) -> Vec { let mut v = Vec::new(); for ex in &ctx.exclude { let ex_abs = canonicalize_or_same(ex); if ex_abs.starts_with(abs_root) { v.push(ex_abs); } } // stets interne Verzeichnisse ausschließen for auto in [&ctx.state_dir, &ctx.mounts_dir, &ctx.work_dir] { let ex_abs = canonicalize_or_same(auto); if ex_abs.starts_with(abs_root) { v.push(ex_abs); } } v } /// Liefert (top_namespace->abs_root) und Manifest (alle Dateien/Symlinks) relativ zum Archiv-Root: "top/relpath". fn collect_manifest(ctx: &Ctx) -> Result<(HashMap, BTreeSet)> { let mut manifest: BTreeSet = BTreeSet::new(); let mut roots: HashMap = HashMap::new(); for inc in &ctx.include { let abs_root = fs::canonicalize(inc)?; let top = abs_key(&abs_root)?; roots.insert(top.clone(), abs_root.clone()); let prune_set = build_prune_set_for_root(&abs_root, ctx); let iter = WalkDir::new(&abs_root) .follow_links(false) .into_iter() .filter_entry(|e| { let p = e.path(); let skip = prune_set.iter().any(|ex| p.starts_with(ex)); !skip }) .filter_map(|e| e.ok()); for entry in iter { let p = entry.path(); let ft = entry.file_type(); if ft.is_dir() { continue; } let md = fs::symlink_metadata(p)?; if !(md.file_type().is_symlink() || md.is_file()) { continue; } let rel = p .strip_prefix(&abs_root) .unwrap_or(Path::new("")) .to_string_lossy() .to_string(); let name = if rel.is_empty() { top.clone() } else { format!("{}/{}", top, rel) }; manifest.insert(name); } } Ok((roots, manifest)) } /* ---------- TAR Streaming Filter: GNU 'D' dumpdir strip ---------- */ fn is_zero_block(b: &[u8]) -> bool { b.iter().all(|&x| x == 0) } fn parse_tar_size_octal(field: &[u8]) -> u64 { let mut s = 0u64; for &c in field { if c >= b'0' && c <= b'7' { s = (s << 3) + (c - b'0') as u64; } } s } fn forward_tar_strip_gnu_dumpdirs(mut r: R, mut w: W) -> Result<()> { let mut header = [0u8; 512]; let mut buf = vec![0u8; 1024 * 1024]; loop { r.read_exact(&mut header).with_context(|| "read tar header")?; if is_zero_block(&header) { w.write_all(&header)?; r.read_exact(&mut header).with_context(|| "read tar trailing zero")?; w.write_all(&header)?; w.flush()?; break; } let typeflag = header[156] as char; let size = parse_tar_size_octal(&header[124..124+12]); let padded = ((size + 511) / 512) * 512; if typeflag == 'D' { let mut to_skip = padded; while to_skip > 0 { let take = to_skip.min(buf.len() as u64) as usize; r.read_exact(&mut buf[..take])?; to_skip -= take as u64; } continue; } w.write_all(&header)?; let mut remaining = padded as i64; while remaining > 0 { let take = remaining.min(buf.len() as i64) as usize; r.read_exact(&mut buf[..take])?; w.write_all(&buf[..take])?; remaining -= take as i64; } } Ok(()) } /* ---------- Meta-Container (Manifeste + tar.snapshot) ---------- */ fn meta_existing(ctx: &Ctx) -> Option { let luks = ctx.meta_luks_path(); let plain = ctx.meta_plain_path(); if luks.exists() { Some(luks) } else if plain.exists() { Some(plain) } else { None } } fn open_meta_read(ctx:&Ctx) -> Result /*mapper*/)>> { if let Some(img) = meta_existing(ctx) { if img.extension().and_then(|e| e.to_str()) == Some("luks") { require_root("open meta LUKS")?; let mapper = "squashr_meta".to_string(); let mut o = Command::new("cryptsetup"); if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} } o.arg("open").arg(&img).arg(&mapper); cryptsetup_run(&mut o, ctx.luks_pass.as_deref(), "cryptsetup open (meta)")?; Ok(Some((PathBuf::from(format!("/dev/mapper/{mapper}")), Some(mapper)))) } else { Ok(Some((img, None))) } } else { Ok(None) } } fn close_meta_mapper(mapper: Option) { if let Some(m) = mapper { let _ = Command::new("cryptsetup").arg("close").arg(&m).status(); } } fn extract_all_meta_to(ctx:&Ctx, dest:&Path) -> Result<()> { if let Some((src, mapper)) = open_meta_read(ctx)? { fs::create_dir_all(dest)?; let mut cmd = Command::new("unsquashfs"); cmd.arg("-d").arg(dest).arg(&src); run(&mut cmd, "unsquashfs meta")?; close_meta_mapper(mapper); } else { fs::create_dir_all(dest)?; } Ok(()) } fn extract_file_from_meta(ctx:&Ctx, filename:&str, dest_path:&Path) -> Result { if let Some((src, mapper)) = open_meta_read(ctx)? { let tmp = ctx.temp_path("meta.extract"); fs::create_dir_all(&tmp)?; // nur die gewünschte Datei extrahieren let mut cmd = Command::new("unsquashfs"); cmd.arg("-d").arg(&tmp).arg(&src).arg(filename); let st = run_ok_status(&mut cmd).with_context(|| "run unsquashfs")?; let ok = st.success(); let srcf = tmp.join(filename); let mut present = false; if ok && srcf.exists() { fs::create_dir_all(dest_path.parent().unwrap_or_else(|| Path::new(".")))?; fs::rename(&srcf, dest_path)?; present = true; } // aufräumen let _ = fs::remove_dir_all(&tmp); close_meta_mapper(mapper); Ok(present) } else { Ok(false) } } fn cleanup_plain_meta_files(ctx:&Ctx) { // tar.snapshot + alle manifest_* im state_dir entfernen, falls vorhanden let _ = fs::remove_file(&ctx.tar_snapshot); if let Ok(rd) = fs::read_dir(&ctx.state_dir) { for e in rd.flatten() { if let Some(name) = e.file_name().to_str() { if name.starts_with("manifest_") && name.ends_with(".txt") { let _ = fs::remove_file(e.path()); } } } } } fn rebuild_meta_from_staging(ctx:&Ctx, staging:&Path) -> Result<()> { // meta tmp plain let tmp_plain = ctx.temp_path("meta.plain.squashfs"); if tmp_plain.exists() { let _ = fs::remove_file(&tmp_plain); } let mut cmd = Command::new("mksquashfs"); cmd.arg(staging).arg(&tmp_plain).arg("-no-progress").arg("-no-recovery"); if ctx.comp_enable { cmd.arg("-comp").arg(&ctx.comp_algo); if let Some(extra)=ctx.comp_args.as_ref(){ for tok in shell_split(extra){ cmd.arg(tok);} } } run(&mut cmd, "mksquashfs (meta)")?; // Ziel festlegen (verschlüsselt/unkryptiert) let plain = ctx.meta_plain_path(); let luks = ctx.meta_luks_path(); // vorhandene Varianten löschen, um Inkonsistenzen zu vermeiden let _ = fs::remove_file(&plain); let _ = fs::remove_file(&luks); if ctx.luks_enable { encrypt_into_luks(ctx, &tmp_plain, &luks)?; let _ = fs::remove_file(&tmp_plain); } else { fs::rename(&tmp_plain, &plain)?; } // Klartextreste entfernen cleanup_plain_meta_files(ctx); Ok(()) } fn save_manifest_to_dir(dest_dir:&Path, idx: usize, manifest: &BTreeSet) -> Result<()> { fs::create_dir_all(dest_dir)?; let path = dest_dir.join(Ctx::manifest_name(idx)); let mut f = fs::File::create(&path).with_context(|| format!("manifest schreiben: {}", path.display()))?; for line in manifest { writeln!(f, "{line}")?; } Ok(()) } fn load_manifest(ctx:&Ctx, idx: usize) -> Result> { // Primär aus Meta-Container lesen let fname = Ctx::manifest_name(idx); let tmp = ctx.temp_path("manifest.read"); fs::create_dir_all(&tmp)?; let out = tmp.join(&fname); if extract_file_from_meta(ctx, &fname, &out)? { let text = fs::read_to_string(&out) .with_context(|| format!("manifest lesen: {}", out.display()))?; let _ = fs::remove_dir_all(&tmp); return Ok(text.lines().map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect()); } // Fallback (falls sehr früher Zustand): Klartextdatei let legacy = ctx.state_dir.join(&fname); if legacy.exists() { let text = fs::read_to_string(&legacy) .with_context(|| format!("manifest lesen: {}", legacy.display()))?; let _ = fs::remove_dir_all(&tmp); return Ok(text.lines().map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect()); } let _ = fs::remove_dir_all(&tmp); bail!("manifest {} not found", fname); } /* ---------- TAR → SQFSTAR ---------- */ /// Erzeugt inkrementellen *tar* und streamt ihn (mit Filter) in `sqfstar`. /// Rückgabewert: true, falls tar mit Non-Zero beendet wurde (Warnung). fn build_squash_image_tar_sqfstar( ctx: &Ctx, out: &Path, roots: &HashMap, ) -> Result { if out.exists() { fs::remove_file(out).with_context(|| format!("remove old output: {}", out.display()))?; } // sqfstar: Tar von stdin → SquashFS out let mut sq = Command::new("sqfstar"); if ctx.comp_enable { sq.arg("-comp").arg(&ctx.comp_algo); } sq.arg(out).arg("-"); sq.stdin(Stdio::piped()); sq.stdout(Stdio::inherit()); sq.stderr(Stdio::inherit()); vlog!(ctx, "[backup] sqfstar ← tar (incremental stream, dumpdir-filtered) → {}", out.display()); let mut sq_child = sq.spawn().with_context(|| "Unable to start sqfstar")?; let sq_stdin = sq_child.stdin.take().ok_or_else(|| anyhow!("cannot open sqfstar stdin"))?; // tar: POSIX/PAX + GNU listed-incremental; Pfade relativ zu / let mut tar = Command::new("tar"); tar.arg("--format=posix") .arg("-C").arg("/") .arg("--null") .arg("--files-from=-") .arg("--listed-incremental").arg(&ctx.tar_snapshot) .arg("-cf").arg("-") .arg("--numeric-owner") .arg("--ignore-failed-read"); // Excludes (relativ zu /) let mut all_excludes: Vec = ctx.exclude.clone(); for auto in [&ctx.state_dir, &ctx.mounts_dir, &ctx.work_dir] { all_excludes.push(auto.clone()); } for ex in all_excludes { let ex_rel = ex.to_string_lossy().trim_start_matches('/').to_string(); if !ex_rel.is_empty() { tar.arg(format!("--exclude={}", ex_rel)); } } tar.stdin(Stdio::piped()); tar.stdout(Stdio::piped()); tar.stderr(Stdio::inherit()); let mut tar_child = tar.spawn().with_context(|| "Unable to start tar")?; // Pfadliste: Roots *relativ* zu / (z.B. "usr", "var", "home/lk") { let mut w = std::io::BufWriter::new( tar_child.stdin.take().ok_or_else(|| anyhow!("cannot open tar stdin"))? ); for (_top, abs_root) in roots.iter() { let rel = abs_root.to_string_lossy().trim_start_matches('/').to_string(); let item = if rel.is_empty() { ".".to_string() } else { rel }; w.write_all(item.as_bytes())?; w.write_all(&[0])?; // NUL } w.flush()?; drop(w); } // tar stdout → (Filter: strip dumpdir) → sqfstar stdin { let tar_stdout = tar_child.stdout.take().ok_or_else(|| anyhow!("cannot open tar stdout"))?; forward_tar_strip_gnu_dumpdirs(tar_stdout, sq_stdin)?; } let tar_status = tar_child.wait().with_context(|| "waiting for tar failed")?; let tar_code = tar_status.code().unwrap_or(-1); let tar_nonzero = tar_code != 0; if tar_nonzero { eprintln!("[warn] tar exited with code {tar_code}; continuing (image may be partial)."); } let sq_status = sq_child.wait().with_context(|| "waiting for sqfstar failed")?; if !sq_status.success() { bail!("sqfstar failed with status {}", sq_status); } Ok(tar_nonzero) } fn truncate_logs(ctx:&Ctx)->Result<()>{ if ctx.truncate.is_empty(){ return Ok(()); } for base in &ctx.truncate { if !base.exists(){ continue; } for entry in WalkDir::new(base).follow_links(false).into_iter().filter_map(|e| e.ok()) { let p = entry.path(); if p.is_file() { let name = p.file_name().and_then(|s| s.to_str()).unwrap_or(""); if name.ends_with(".gz") || name.ends_with(".xz") || name.ends_with(".zst") || name.ends_with(".bz2") { let _ = fs::remove_file(p); continue; } let _ = fs::OpenOptions::new().write(true).open(p).and_then(|f| { f.set_len(0)?; Ok(()) }); } } } Ok(()) } fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{ require_root("Creating LUKS container")?; let plain_size = fs::metadata(plain)?.len(); let size = plain_size + 16 * 1024 * 1024; { let f = fs::File::create(out_luks)?; f.set_len(size)?; } vlog!(ctx, "[luks] luksFormat {}", out_luks.display()); let mut c = Command::new("cryptsetup"); if let Some(args)=ctx.luks_create_args.as_ref(){ for t in shell_split(args){ c.arg(t);} } c.arg("luksFormat").arg(out_luks); cryptsetup_run(&mut c, ctx.luks_pass.as_deref(), "cryptsetup luksFormat")?; let mapper = format!("squashr_{}", parse_snap_index(out_luks).unwrap_or(0)); vlog!(ctx, "[luks] open {} as {}", out_luks.display(), mapper); let mut o = Command::new("cryptsetup"); if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} } o.arg("open").arg(out_luks).arg(&mapper); cryptsetup_run(&mut o, ctx.luks_pass.as_deref(), "cryptsetup open")?; let dev = format!("/dev/mapper/{}", mapper); vlog!(ctx, "[luks] dd {} → {}", plain.display(), dev); let mut dd = Command::new("dd"); dd.arg(format!("if={}", plain.display())) .arg(format!("of={}", dev)) .arg("bs=4M").arg("status=none").arg("conv=fsync"); run(&mut dd, "dd")?; vlog!(ctx, "[luks] close {}", mapper); let mut close = Command::new("cryptsetup"); close.arg("close").arg(&mapper); run(&mut close, "cryptsetup close")?; Ok(()) } /* ---------- Mount Helfer / FUSE Fallback ---------- */ fn try_mount_kernel_squashfs(source:&Path, mnt:&Path, use_loop:bool) -> Result { let mut cmd = Command::new("mount"); cmd.arg("-t").arg("squashfs"); if use_loop { cmd.arg("-o").arg("loop,ro"); } else { cmd.arg("-o").arg("ro"); } cmd.arg(source).arg(mnt); let status = cmd.status().with_context(|| "start mount squashfs (kernel)")?; Ok(status.success()) } fn try_mount_fuse_squashfs(source:&Path, mnt:&Path) -> Result { let status = Command::new("squashfuse") .arg("-o").arg("ro") .arg(source) .arg(mnt) .status(); match status { Ok(s) => Ok(s.success()), Err(e) => Err(anyhow!("squashfuse nicht startbar: {e}")), } } /* ---------- Mount-Info Utilities ---------- */ #[derive(Debug)] struct MountEntry { src:String, tgt:PathBuf, fstype:String, opts:String } fn read_proc_mounts() -> Result> { let text = fs::read_to_string("/proc/mounts").context("read /proc/mounts")?; let mut out = Vec::new(); for line in text.lines() { // /proc/mounts: src tgt fstype opts 0 0 let mut it = line.split_whitespace(); let (Some(src), Some(tgt), Some(fstype), Some(opts)) = (it.next(), it.next(), it.next(), it.next()) else { continue }; out.push(MountEntry{ src: src.to_string(), tgt: PathBuf::from(tgt), fstype: fstype.to_string(), opts: opts.to_string(), }); } Ok(out) } fn is_target_mounted(tgt: &Path) -> bool { if let Ok(ms) = read_proc_mounts() { let at = abspath(tgt); ms.into_iter().any(|m| abspath(&m.tgt) == at) } else { false } } fn sort_paths_deep_first(mut v: Vec) -> Vec { v.sort_by_key(|p| std::cmp::Reverse(p.components().count())); v.dedup(); v } fn loop_backing_file(dev: &str) -> Option { // Erwartet "/dev/loopX" let name = Path::new(dev).file_name()?.to_string_lossy().to_string(); let candidates = [ format!("/sys/block/{}/loop/backing_file", name), format!("/sys/devices/virtual/block/{}/loop/backing_file", name), ]; for p in candidates { if let Ok(s) = fs::read_to_string(&p) { let t = s.trim(); if !t.is_empty() { return Some(PathBuf::from(t)); } } } None } /* ---------- Backup ---------- */ fn ensure_tar_snapshot_materialized(ctx:&Ctx) -> Result<()> { // Falls in Meta enthalten → herausziehen, sonst tar lässt neu schreiben let _ = extract_file_from_meta(ctx, "tar.snapshot", &ctx.tar_snapshot)?; Ok(()) } fn cmd_backup(ctx:&mut Ctx)->Result<()>{ ensure_includes_nonempty(ctx)?; truncate_logs(ctx)?; // Vorbereitungen: tar.snapshot bereitstellen ensure_tar_snapshot_materialized(ctx)?; let snaps = ctx.list_snapshots()?; let next_idx = snaps.len() + 1; // 1) Manifest aufnehmen (inkl. Excludes & interne Verzeichnisse) let (roots, manifest_now) = collect_manifest(ctx)?; let manifest_prev = if next_idx > 1 { load_manifest(ctx, next_idx - 1).ok() } else { None }; // 2) Falls keine Änderung: minimalistisches Image erstellen (wie bisher) let no_changes = manifest_prev.as_ref().map_or(false, |m| m == &manifest_now); let plain_img = ctx.temp_path("snapshot.plain.squashfs"); let mut tar_warn = false; if no_changes { vlog!(ctx, "[backup] no changes → creating minimal image {}", plain_img.display()); let empty_src = ctx.temp_path("empty.src"); fs::create_dir_all(&empty_src)?; let mut cmd = Command::new("mksquashfs"); cmd.arg(&empty_src).arg(&plain_img).arg("-no-progress").arg("-no-recovery"); if ctx.comp_enable { cmd.arg("-comp").arg(&ctx.comp_algo); if let Some(extra)=ctx.comp_args.as_ref() { for tok in shell_split(extra){ cmd.arg(tok); } } } run(&mut cmd, "mksquashfs (empty)")?; } else { // 3) tar (listed-incremental) → Filter (strip 'D') → sqfstar tar_warn = build_squash_image_tar_sqfstar(ctx, &plain_img, &roots)?; } // 4) Optional: LUKS-Container schreiben let final_path = if ctx.luks_enable { let out = ctx.snapshot_path(next_idx, true); encrypt_into_luks(ctx, &plain_img, &out)?; fs::remove_file(&plain_img).ok(); out } else { let out = ctx.snapshot_path(next_idx, false); fs::rename(&plain_img, &out)?; out }; // 5) Meta-Container neu bauen (alle Manifeste + tar.snapshot) let staging = ctx.temp_path("meta.staging"); extract_all_meta_to(ctx, &staging)?; // neues Manifest hinein save_manifest_to_dir(&staging, next_idx, &manifest_now)?; // aktualisierte tar.snapshot hinein (falls vorhanden) if ctx.tar_snapshot.exists() { let dst = staging.join("tar.snapshot"); if dst.exists() { let _ = fs::remove_file(&dst); } fs::copy(&ctx.tar_snapshot, &dst)?; } rebuild_meta_from_staging(ctx, &staging)?; let _ = fs::remove_dir_all(&staging); if tar_warn { eprintln!("[warn] Snapshot {:04} created, but tar reported non-zero exit. Some files may be missing or changed during read.", next_idx); } println!("Created new snapshot: {:04}", final_path.display()); rotate_if_needed(ctx)?; Ok(()) } fn rotate_if_needed(ctx:&mut Ctx)->Result<()>{ if let Some(max)=ctx.max_keep { loop { let snaps = ctx.list_snapshots()?; if snaps.len() <= max { break; } merge_first_two(ctx)?; } } Ok(()) } /* ---------- Whiteouts / Mount / Overlay ---------- */ fn create_whiteouts_unlink_list(ctx:&Ctx, upto:usize)->Result>{ if upto == 0 { return Ok(vec![]); } let present = load_manifest(ctx, upto).unwrap_or_default(); let mut union_past: BTreeSet = BTreeSet::new(); for i in 1..upto { let m = load_manifest(ctx, i).unwrap_or_default(); for s in m { union_past.insert(s); } } let deletions: Vec = union_past.difference(&present).cloned().collect(); Ok(deletions) } fn apply_whiteouts_via_unlink(view_root:&Path, deletes:&[String]) -> Result<()> { for rel in deletes { let p = view_root.join(rel); match fs::remove_file(&p) { Ok(_) => {}, Err(e) if e.kind() == ErrorKind::IsADirectory => { let _ = fs::remove_dir(&p); } Err(e) if e.kind() == ErrorKind::NotFound => { /* okay */ } Err(e) => return Err(anyhow!("Unlink failed for {}: {}", p.display(), e)), } } Ok(()) } fn mount_image_ro(ctx:&Ctx, img:&Path, mnt:&Path)->Result<()>{ require_root("Mount SquashFS")?; let mnt_abs = abspath(mnt); if img.extension().and_then(|e| e.to_str()) == Some("luks") { // LUKS → /dev/mapper/ let mapper = format!( "squashr_mount_{}", img.file_stem().and_then(|s| s.to_str()).unwrap_or("img") ); let dev = format!("/dev/mapper/{}", mapper); // evtl. Alt-Mapping schließen let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status(); let mut o = Command::new("cryptsetup"); if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} } o.arg("open").arg(img).arg(&mapper); cryptsetup_run(&mut o, ctx.luks_pass.as_deref(), "cryptsetup open (mount)")?; // 1) Kernel-Mount versuchen match try_mount_kernel_squashfs(Path::new(&dev), &mnt_abs, false) { Ok(true) => return Ok(()), Ok(false) => { eprintln!("[warn] Kernel-SquashFS-Mount fehlgeschlagen – versuche FUSE (squashfuse)."); } Err(e) => { eprintln!("[warn] Kernel-SquashFS-Mount Fehler: {e} – versuche FUSE (squashfuse)."); } } // 2) FUSE-Fallback versuchen match try_mount_fuse_squashfs(Path::new(&dev), &mnt_abs) { Ok(true) => return Ok(()), Ok(false) => { // Mapping sauber schließen let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status(); bail!("FUSE-Mount (squashfuse) fehlgeschlagen. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'."); } Err(e) => { let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status(); bail!("{e}. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'."); } } } else { // Plain SquashFS-Datei match try_mount_kernel_squashfs(img, &mnt_abs, true) { Ok(true) => return Ok(()), Ok(false) => { eprintln!("[warn] Kernel-SquashFS-Mount fehlgeschlagen – versuche FUSE (squashfuse)."); } Err(e) => { eprintln!("[warn] Kernel-SquashFS-Mount Fehler: {e} – versuche FUSE (squashfuse)."); } } match try_mount_fuse_squashfs(img, &mnt_abs) { Ok(true) => Ok(()), Ok(false) => bail!("FUSE-Mount (squashfuse) fehlgeschlagen. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'."), Err(e) => bail!("{e}. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'."), } } } fn mount_overlay(lowerdirs:&str, upper:&Path, work:&Path, target:&Path)->Result<()>{ require_root("Mount OverlayFS")?; let opt = format!( "lowerdir={},upperdir={},workdir={}", lowerdirs, abspath(upper).display(), abspath(work).display() ); let mut cmd = Command::new("mount"); cmd.arg("-t").arg("overlay").arg("overlay") .arg("-o").arg(opt) .arg(abspath(target)); run(&mut cmd, "mount overlay")?; Ok(()) } /* ---------- Umount (robust, idempotent) ---------- */ fn umount(path:&Path)->Result<()>{ require_root("umount")?; // Wenn nicht gemountet → OK (idempotent) if !is_target_mounted(path) { return Ok(()); } // Erst normal versuchen let mut cmd = Command::new("umount"); cmd.arg(path); let status = cmd.status().with_context(|| "umount aufrufen")?; if status.success() { return Ok(()); } // FUSE-Fälle: fusermount3/fusermount let _ = Command::new("fusermount3").arg("-u").arg(path).status(); let status2 = Command::new("fusermount").arg("-u").arg(path).status(); if let Ok(s) = status2 { if s.success() { return Ok(()); } } // Letzter Versuch: lazy let _ = Command::new("umount").arg("-l").arg(path).status(); if !is_target_mounted(path) { return Ok(()); } bail!("umount/fusermount fehlgeschlagen für {}", path.display()); } fn find_snapshot_file(ctx:&Ctx, idx:usize)->Result{ let p1 = ctx.snapshot_path(idx, false); let p2 = ctx.snapshot_path(idx, true); if p1.exists(){ Ok(p1) } else if p2.exists(){ Ok(p2) } else { bail!("Snapshot not found: {}.", idx); } } fn cmd_mount(ctx:&mut Ctx, s:Option, target:&Path)->Result<()>{ require_root("mount")?; fs::create_dir_all(target)?; let snaps = ctx.list_snapshots()?; if snaps.is_empty(){ bail!("No snapshots found."); } let upto = if let Some(n)=s { let max = snaps.len(); if n==0 || n>max { bail!("Snapshot -s {} is invalid (1..={}).", n, max); } n } else { snaps.len() }; if upto == 1 { let img = find_snapshot_file(ctx, 1)?; mount_image_ro(ctx, &img, &abspath(target))?; println!("Mounted to {}.", abspath(target).display()); return Ok(()); } // MULTI-LAYER let mut lowers: Vec = vec![]; for i in 1..=upto { let img = find_snapshot_file(ctx, i)?; let mnt = ctx.mounts_dir.join(format!("snap_{:04}", i)); fs::create_dir_all(&mnt)?; mount_image_ro(ctx, &img, &mnt)?; lowers.push(abspath(&mnt)); } let upper = abspath(&ctx.temp_path("overlay.upper")); let work = abspath(&ctx.temp_path("overlay.work")); fs::create_dir_all(&upper)?; fs::create_dir_all(&work)?; let loweropt = lowers.iter().rev().map(|p| p.display().to_string()).join(":"); // neuester zuerst mount_overlay(&loweropt, &upper, &work, &abspath(target))?; // Whiteouts/Unlinks anhand der Manifeste bis 'upto' let deletes = create_whiteouts_unlink_list(ctx, upto)?; if !deletes.is_empty() { apply_whiteouts_via_unlink(&abspath(target), &deletes)?; } println!("Mounted to {} up to snapshot {:04}.", target.display(), upto); println!("Unmount with: umount {} ; und die snap-Mounts unter {}", target.display(), ctx.mounts_dir.display()); Ok(()) } /* ---------- Delete / New ---------- */ fn cmd_delete(ctx:&mut Ctx, s:usize)->Result<()>{ let path_plain = ctx.snapshot_path(s, false); let path_luks = ctx.snapshot_path(s, true); if !path_plain.exists() && !path_luks.exists() { bail!("Snapshot {} does not exist.", s); } if path_plain.exists(){ fs::remove_file(&path_plain)?; } if path_luks.exists(){ fs::remove_file(&path_luks)?; } // Alle nachfolgenden Snapshots umnummerieren let snaps = ctx.list_snapshots()?; for p in snaps { let n = parse_snap_index(&p)?; if n > s { let is_luks = p.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks"; let newp = ctx.snapshot_path(n-1, is_luks); fs::rename(&p, &newp)?; } } // Meta-Container anpassen: manifest_s löschen, >s dekrementieren let staging = ctx.temp_path("meta.staging"); extract_all_meta_to(ctx, &staging)?; let to_del = staging.join(Ctx::manifest_name(s)); let _ = fs::remove_file(&to_del); // renumber let mut k = s + 1; loop { let from = staging.join(Ctx::manifest_name(k)); if !from.exists() { break; } let to = staging.join(Ctx::manifest_name(k - 1)); if to.exists() { let _ = fs::remove_file(&to); } fs::rename(&from, &to)?; k += 1; } rebuild_meta_from_staging(ctx, &staging)?; let _ = fs::remove_dir_all(&staging); println!("Deleted snapshot {}, decrementing children.", s); Ok(()) } fn cmd_new(ctx:&mut Ctx)->Result<()>{ for s in ctx.list_snapshots()? { fs::remove_file(s)?; } // Meta-Container & Klartextreste löschen let _ = fs::remove_file(ctx.meta_plain_path()); let _ = fs::remove_file(ctx.meta_luks_path()); cleanup_plain_meta_files(ctx); println!("State cleared. Creating new initial snapshot."); cmd_backup(ctx) } /* ---------- Umount Command ---------- */ fn cmd_umount(ctx:&mut Ctx, target: Option<&Path>) -> Result<()> { require_root("umount")?; let mounts_before = read_proc_mounts()?; let mut todo: Vec = Vec::new(); let mut mappers_in_use: HashSet = HashSet::new(); let mut loops_in_use: HashSet = HashSet::new(); if let Some(t) = target { if is_target_mounted(t) { todo.push(abspath(t)); } if let Some(m) = mounts_before.iter().find(|m| abspath(&m.tgt) == abspath(t)) { if m.src.starts_with("/dev/mapper/squashr_") { mappers_in_use.insert(Path::new(&m.src).file_name().unwrap_or_default().to_string_lossy().to_string()); } else if m.src.starts_with("/dev/loop") { loops_in_use.insert(m.src.clone()); } } } else { // 1) Alle Mounts unter mounts_dir for m in &mounts_before { if m.tgt.starts_with(&ctx.mounts_dir) { todo.push(m.tgt.clone()); } } // 2) Overlay-Ziele mit upperdir in work_dir for m in &mounts_before { if m.fstype == "overlay" && m.opts.contains("upperdir=") { if let Some(start) = m.opts.find("upperdir=") { let rest = &m.opts[start + "upperdir=".len()..]; let upper = rest.split(',').next().unwrap_or(""); if upper.starts_with(ctx.work_dir.to_string_lossy().as_ref()) { todo.push(m.tgt.clone()); } } } } // 3) Mapper-SquashFS for m in &mounts_before { if m.fstype == "squashfs" && m.src.starts_with("/dev/mapper/squashr_") { todo.push(m.tgt.clone()); let mapper = Path::new(&m.src).file_name().unwrap_or_default().to_string_lossy().to_string(); mappers_in_use.insert(mapper); } } // 4) Loop-SquashFS aus state_dir for m in &mounts_before { if m.fstype == "squashfs" && m.src.starts_with("/dev/loop") { if let Some(back) = loop_backing_file(&m.src) { if back.starts_with(&ctx.state_dir) { todo.push(m.tgt.clone()); loops_in_use.insert(m.src.clone()); } } } } // 5) FUSE mounts auf state_dir-Quellen oder Mapper for m in &mounts_before { if m.fstype.starts_with("fuse") { let src_path = PathBuf::from(&m.src); if src_path.starts_with(&ctx.state_dir) && src_path.extension().and_then(|e| e.to_str()) == Some("squashfs") { todo.push(m.tgt.clone()); } if m.src.starts_with("/dev/mapper/squashr_") { todo.push(m.tgt.clone()); let mapper = Path::new(&m.src).file_name().unwrap_or_default().to_string_lossy().to_string(); mappers_in_use.insert(mapper); } } } } // Nur aktuell gemountete Ziele behalten let mounted_now: HashSet = read_proc_mounts()? .into_iter() .map(|m| abspath(&m.tgt)) .collect(); todo = todo.into_iter() .map(|p| abspath(&p)) .filter(|p| mounted_now.contains(p)) .collect(); let mut did_something = !todo.is_empty(); let todo = sort_paths_deep_first(todo); // Aushängen let mut errors = Vec::new(); for mpt in &todo { match umount(mpt) { Ok(_) => vlog!(ctx, "[umount] {}", mpt.display()), Err(e) => { eprintln!("[warn] umount {}: {}", mpt.display(), e); errors.push((mpt.clone(), e.to_string())); } } } // Nach dem Aushängen erneut Mounts einlesen let mounts_after = read_proc_mounts().unwrap_or_default(); // LUKS-Mapper schließen (nur, wenn nicht mehr gemountet) for mapper in mappers_in_use { let devpath = format!("/dev/mapper/{mapper}"); let still_mounted = mounts_after.iter().any(|m| m.src == devpath); if !still_mounted { let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status(); vlog!(ctx, "[cryptsetup] close {}", mapper); did_something = true; } else { eprintln!("[warn] Device {mapper} is still in use."); errors.push((PathBuf::from(devpath), "still in use".into())); } } // Loop-Devices lösen for loopdev in loops_in_use { let still_mounted = mounts_after.iter().any(|m| m.src == loopdev); if !still_mounted { if Path::new(&loopdev).exists() { let _ = Command::new("losetup").arg("-d").arg(&loopdev).status(); vlog!(ctx, "[losetup] detach {}", loopdev); did_something = true; } } else { eprintln!("[warn] Loop device {} still in use.", loopdev); errors.push((PathBuf::from(loopdev), "still in use".into())); } } if !did_something && target.is_none() { println!("No SquashR mounts found."); } if errors.is_empty() { println!("All requested mounts unmounted and SquashR devices closed."); Ok(()) } else { bail!( "Some mounts/devices could not be released: {:?}", errors.iter().map(|(p,_)| p.display().to_string()).collect::>() ) } } /* ---------- Minimize ---------- */ fn cmd_minimize(ctx:&mut Ctx, n_opt:Option)->Result<()>{ let target = n_opt.unwrap_or(ctx.min_keep); if target < ctx.min_keep { bail!("n ({}) < SQUASHR_N_SNAPSHOTS_MIN ({}) not allowed.", target, ctx.min_keep); } loop { let snaps = ctx.list_snapshots()?; if snaps.len() <= target { break; } merge_first_two(ctx)?; } println!("Minimized. New snapshot count: {}", ctx.list_snapshots()?.len()); Ok(()) } /* ---------- Merge ---------- */ fn merge_first_two(ctx:&mut Ctx)->Result<()>{ require_root("merge (mounts / overlay)")?; let snaps = ctx.list_snapshots()?; if snaps.len() < 2 { return Ok(()); } let s1 = &snaps[0]; let s2 = &snaps[1]; let idx1 = parse_snap_index(s1)?; let idx2 = parse_snap_index(s2)?; if idx2 != idx1+1 { bail!("Unexpected indices: {:04} + {:04}", idx1, idx2); } let m1 = ctx.mounts_dir.join(format!("merge_{:04}", idx1)); let m2 = ctx.mounts_dir.join(format!("merge_{:04}", idx2)); fs::create_dir_all(&m1)?; fs::create_dir_all(&m2)?; mount_image_ro(ctx, s1, &m1)?; mount_image_ro(ctx, s2, &m2)?; let upper = ctx.temp_path("merge.upper"); let work = ctx.temp_path("merge.work"); let view = ctx.temp_path("merge.view"); fs::create_dir_all(&upper)?; fs::create_dir_all(&work)?; fs::create_dir_all(&view)?; let loweropt = format!("{}:{}", abspath(&m1).display(), abspath(&m2).display()); // s2 über s1 mount_overlay(&loweropt, &upper, &work, &view)?; // Unlinks gemäß manifest_2 (Zielzustand) gegenüber manifest_1 let present = load_manifest(ctx, idx2).unwrap_or_default(); let past = load_manifest(ctx, idx1).unwrap_or_default(); let deletes: Vec = past.difference(&present).cloned().collect(); if !deletes.is_empty() { apply_whiteouts_via_unlink(&abspath(&view), &deletes)?; } // neues SquashFS aus dem View let tmp_plain = ctx.temp_path("merge.plain.sqsh"); let mut cmd = Command::new("mksquashfs"); cmd.arg(&view).arg(&tmp_plain).arg("-no-progress").arg("-no-recovery"); if ctx.comp_enable { cmd.arg("-comp").arg(&ctx.comp_algo); if let Some(extra)=ctx.comp_args.as_ref(){ for tok in shell_split(extra){ cmd.arg(tok);} } } run(&mut cmd, "mksquashfs (merge)")?; let _ = umount(&view); let _ = umount(&m2); let _ = umount(&m1); let is_luks = s1.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks"; let out_path = ctx.snapshot_path(idx1, is_luks); if s1.exists(){ fs::remove_file(s1)?; } if is_luks { encrypt_into_luks(ctx, &tmp_plain, &out_path)?; fs::remove_file(&tmp_plain).ok(); } else { fs::rename(&tmp_plain, &out_path)?; } if s2.exists(){ fs::remove_file(s2)?; } // Indizes > idx2 runterzählen (Dateien) let rest = ctx.list_snapshots()?; for p in rest { let n = parse_snap_index(&p)?; if n > idx2 { let is_luks = p.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks"; let newp = ctx.snapshot_path(n-1, is_luks); fs::rename(&p, &newp)?; } } // Meta-Container anpassen: manifest_1 := manifest_2; danach alle >2 dekrementieren let staging = ctx.temp_path("meta.staging"); extract_all_meta_to(ctx, &staging)?; // manifest_2 -> manifest_1 (overwrite) let m1p = staging.join(Ctx::manifest_name(1)); let m2p = staging.join(Ctx::manifest_name(2)); if m1p.exists(){ let _ = fs::remove_file(&m1p); } if m2p.exists(){ fs::rename(&m2p, &m1p).ok(); } // ab 3 dekrementieren let mut k = 3usize; loop { let from = staging.join(Ctx::manifest_name(k)); if !from.exists() { break; } let to = staging.join(Ctx::manifest_name(k-1)); if to.exists() { let _ = fs::remove_file(&to); } fs::rename(&from, &to)?; k += 1; } rebuild_meta_from_staging(ctx, &staging)?; let _ = fs::remove_dir_all(&staging); println!("Merged snapshots {:04} + {:04} → {:04}.", idx1, idx2, idx1); Ok(()) }