aboutsummaryrefslogtreecommitdiffstats
path: root/src/main.rs
diff options
context:
space:
mode:
authorLeonard Kugis <leonard@kug.is>2025-10-15 18:19:39 +0200
committerLeonard Kugis <leonard@kug.is>2025-10-15 18:19:39 +0200
commitf7c27b648ced54d4075d77ba3f11d8078d1e73be (patch)
treef045ebfff8c0550d1405f3c39c305b13bdcac4c3 /src/main.rs
parent8070e3852d647ab64142402e2bf09cc5f719b244 (diff)
downloadsquashr-f7c27b648ced54d4075d77ba3f11d8078d1e73be.tar.gz
Rework to use tar streaming backend
This allows for usage of incremental timestamp functionality of tar.
Diffstat (limited to 'src/main.rs')
-rwxr-xr-xsrc/main.rs1260
1 files changed, 757 insertions, 503 deletions
diff --git a/src/main.rs b/src/main.rs
index 8df82ec..4eb544a 100755
--- a/src/main.rs
+++ b/src/main.rs
@@ -19,17 +19,24 @@ use anyhow::{anyhow, bail, Context, Result};
use clap::{Parser, Subcommand, ValueHint};
use itertools::Itertools;
use regex::Regex;
-use rusqlite::{params, Connection};
use serde::Deserialize;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::env;
use std::fs;
-use std::io::Write;
-use std::os::unix::fs::MetadataExt;
+use std::io::{Read, Write};
+use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use walkdir::WalkDir;
-use std::io::ErrorKind;
+
+// kleine Helfer für Verbose-Logs
+macro_rules! vlog {
+ ($ctx:expr, $($arg:tt)*) => {
+ if $ctx.verbose {
+ eprintln!($($arg)*);
+ }
+ };
+}
#[derive(Parser, Debug)]
#[command(name="squashr", version, about="Manage backups based on SquashFS, OverlayFS and LUKS")]
@@ -73,6 +80,18 @@ struct Cli {
#[arg(long, allow_hyphen_values = true)]
squashr_cryptsetup_open_args: Option<String>,
+ /// Passphrase für cryptsetup; ACHTUNG: Sichtbar im Prozesslisting.
+ #[arg(long)]
+ squashr_cryptsetup_pass: Option<String>,
+
+ /// Datei mit Passphrase (roh, binär möglich); hat Vorrang, wenn beides gesetzt ist.
+ #[arg(long, value_hint=ValueHint::FilePath)]
+ squashr_cryptsetup_pass_file: Option<PathBuf>,
+
+ /// Mehr Ausgaben, inkl. Auflistung gesicherter Dateien
+ #[arg(short, long)]
+ verbose: bool,
+
#[command(subcommand)]
cmd: Cmd,
}
@@ -83,8 +102,9 @@ enum Cmd {
Minimize { #[arg(short = 'n')] n: Option<usize> },
New,
Mount { #[arg(short = 's')] s: Option<usize>, target: PathBuf },
- Umount { target: PathBuf },
Delete { #[arg(short = 's')] s: usize },
+ /// Hängt SquashR-relevante Mounts aus (ohne Argument: alle; mit Pfad: nur diesen).
+ Umount { target: Option<PathBuf> },
}
#[derive(Debug, Clone, Deserialize)]
@@ -101,6 +121,8 @@ struct Config {
SQUASHR_CRYPTSETUP_ENABLE: Option<bool>,
SQUASHR_CRYPTSETUP_CREATE_ARGS: Option<String>,
SQUASHR_CRYPTSETUP_OPEN_ARGS: Option<String>,
+ SQUASHR_CRYPTSETUP_PASS: Option<String>,
+ SQUASHR_CRYPTSETUP_PASS_FILE: Option<PathBuf>,
}
impl Default for Config {
@@ -118,6 +140,8 @@ impl Default for Config {
SQUASHR_CRYPTSETUP_ENABLE: Some(false),
SQUASHR_CRYPTSETUP_CREATE_ARGS: Some("--type luks2".to_string()),
SQUASHR_CRYPTSETUP_OPEN_ARGS: Some("--type luks".to_string()),
+ SQUASHR_CRYPTSETUP_PASS: None,
+ SQUASHR_CRYPTSETUP_PASS_FILE: None,
}
}
}
@@ -163,6 +187,8 @@ fn load_config(path: Option<&Path>) -> Result<Config> {
set("SQUASHR_CRYPTSETUP_ENABLE", &mut |v| cfg.SQUASHR_CRYPTSETUP_ENABLE = v.parse().ok());
set("SQUASHR_CRYPTSETUP_CREATE_ARGS", &mut |v| cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS = Some(v));
set("SQUASHR_CRYPTSETUP_OPEN_ARGS", &mut |v| cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS = Some(v));
+ set("SQUASHR_CRYPTSETUP_PASS", &mut |v| cfg.SQUASHR_CRYPTSETUP_PASS = Some(v));
+ set("SQUASHR_CRYPTSETUP_PASS_FILE", &mut |v| cfg.SQUASHR_CRYPTSETUP_PASS_FILE = Some(PathBuf::from(v)));
}
}
Ok(cfg)
@@ -185,6 +211,8 @@ fn apply_env_overrides(mut cfg: Config)->Result<Config>{
if let Some(v)=bool_env("SQUASHR_CRYPTSETUP_ENABLE"){ cfg.SQUASHR_CRYPTSETUP_ENABLE=Some(v); }
if let Some(v)=str_env("SQUASHR_CRYPTSETUP_CREATE_ARGS"){ cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS=Some(v); }
if let Some(v)=str_env("SQUASHR_CRYPTSETUP_OPEN_ARGS"){ cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS=Some(v); }
+ if let Some(v)=str_env("SQUASHR_CRYPTSETUP_PASS"){ cfg.SQUASHR_CRYPTSETUP_PASS=Some(v); }
+ if let Some(v)=str_env("SQUASHR_CRYPTSETUP_PASS_FILE"){ cfg.SQUASHR_CRYPTSETUP_PASS_FILE=Some(PathBuf::from(v)); }
Ok(cfg)
}
@@ -201,27 +229,18 @@ fn apply_cli_overrides(mut cfg: Config, cli:&Cli)->Result<Config>{
if let Some(v)=cli.squashr_cryptsetup_enable{ cfg.SQUASHR_CRYPTSETUP_ENABLE=Some(v); }
if let Some(v)=cli.squashr_cryptsetup_create_args.clone(){ cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS=Some(v); }
if let Some(v)=cli.squashr_cryptsetup_open_args.clone(){ cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS=Some(v); }
+ if let Some(v)=cli.squashr_cryptsetup_pass.clone(){ cfg.SQUASHR_CRYPTSETUP_PASS=Some(v); }
+ if let Some(v)=cli.squashr_cryptsetup_pass_file.clone(){ cfg.SQUASHR_CRYPTSETUP_PASS_FILE=Some(v); }
Ok(cfg)
}
-fn default_config_paths() -> Vec<PathBuf> {
- let mut v = Vec::new();
- if let Ok(xdg) = env::var("XDG_CONFIG_HOME") {
- v.push(PathBuf::from(xdg).join("squashr/squashr.conf"));
- }
- if let Ok(home) = env::var("HOME") {
- v.push(PathBuf::from(home).join(".config/squashr/squashr.conf"));
- }
- v.push(PathBuf::from("/etc/squashr.conf"));
- v
-}
-
#[derive(Clone)]
struct Ctx {
+ root: PathBuf,
state_dir: PathBuf,
mounts_dir: PathBuf,
work_dir: PathBuf,
- index_db: PathBuf,
+ tar_snapshot: PathBuf, // GNU-tar Snapshot-Datei
min_keep: usize,
max_keep: Option<usize>,
include: Vec<PathBuf>,
@@ -233,10 +252,12 @@ struct Ctx {
luks_enable: bool,
luks_create_args: Option<String>,
luks_open_args: Option<String>,
+ luks_pass: Option<Vec<u8>>,
+ verbose: bool,
}
impl Ctx {
- fn new(cfg: Config) -> Result<Self> {
+ fn new(cfg: Config, verbose: bool) -> Result<Self> {
let root = cfg.SQUASHR_ROOT.ok_or_else(|| anyhow!("SQUASHR_ROOT not set!"))?;
let state_dir = root.join("state");
let mounts_dir = root.join("mounts");
@@ -244,14 +265,26 @@ impl Ctx {
fs::create_dir_all(&state_dir)?;
fs::create_dir_all(&mounts_dir)?;
fs::create_dir_all(&work_dir)?;
- let index_db = state_dir.join("index.sqlite");
+ let tar_snapshot = state_dir.join("tar.snapshot");
+
let max_raw = cfg.SQUASHR_N_SNAPSHOTS_MAX.unwrap_or(0);
let max_keep = if max_raw == 0 { None } else { Some(max_raw) };
+
+ // Passphrase ermitteln (CLI-String hat Vorrang vor Datei)
+ let luks_pass = if let Some(p) = cfg.SQUASHR_CRYPTSETUP_PASS {
+ Some(p.into_bytes())
+ } else if let Some(f) = cfg.SQUASHR_CRYPTSETUP_PASS_FILE {
+ Some(fs::read(&f).with_context(|| format!("Pass-Datei lesen: {}", f.display()))?)
+ } else {
+ None
+ };
+
Ok(Self {
+ root,
state_dir,
mounts_dir,
work_dir,
- index_db,
+ tar_snapshot,
min_keep: cfg.SQUASHR_N_SNAPSHOTS_MIN.unwrap_or(5),
max_keep,
include: cfg.SQUASHR_INCLUDE.unwrap_or_default().into_iter().map(PathBuf::from).collect(),
@@ -263,6 +296,8 @@ impl Ctx {
luks_enable: cfg.SQUASHR_CRYPTSETUP_ENABLE.unwrap_or(false),
luks_create_args: cfg.SQUASHR_CRYPTSETUP_CREATE_ARGS,
luks_open_args: cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS,
+ luks_pass,
+ verbose,
})
}
@@ -287,6 +322,10 @@ impl Ctx {
self.state_dir.join(name)
}
+ fn manifest_path(&self, idx: usize) -> PathBuf {
+ self.state_dir.join(format!("manifest_{:04}.txt", idx))
+ }
+
fn temp_path(&self, stem: &str) -> PathBuf {
let ts = chrono::Local::now().format("%Y%m%d-%H%M%S");
self.work_dir.join(format!("{}_{}", stem, ts))
@@ -295,22 +334,18 @@ impl Ctx {
fn main() -> Result<()> {
let cli = Cli::parse();
- let cfg_path = match &cli.config {
- Some(p) => Some(p.clone()),
- None => default_config_paths().into_iter().find(|p| p.exists()),
- };
let cfg = load_config(cli.config.as_deref())?;
let cfg = apply_env_overrides(cfg)?;
let cfg = apply_cli_overrides(cfg, &cli)?;
- let mut ctx = Ctx::new(cfg)?;
+ let mut ctx = Ctx::new(cfg, cli.verbose)?;
match cli.cmd {
Cmd::Backup => cmd_backup(&mut ctx),
Cmd::Minimize { n } => cmd_minimize(&mut ctx, n),
Cmd::New => cmd_new(&mut ctx),
Cmd::Mount { s, target } => cmd_mount(&mut ctx, s, &target),
- Cmd::Umount { target } => cmd_umount(&mut ctx, &target),
Cmd::Delete { s } => cmd_delete(&mut ctx, s),
+ Cmd::Umount { target } => cmd_umount(&mut ctx, target.as_deref()),
}
}
@@ -325,8 +360,9 @@ fn abs_key(p: &Path) -> Result<String> {
}
fn parse_snap_index(p:&Path)->Result<usize>{
- let fname = p.file_stem().and_then(|s| s.to_str()).ok_or_else(||anyhow!("Invalid filename: {}", p.display()))?;
- let re = Regex::new(r"^(\d{4})$").unwrap();
+ let fname = p.file_name().and_then(|s| s.to_str()).ok_or_else(||anyhow!("Invalid filename: {}", p.display()))?;
+ // robust: nur führende 4 Ziffern extrahieren
+ let re = Regex::new(r"^(\d{4})").unwrap();
let caps = re.captures(fname).ok_or_else(|| anyhow!("No index found: {}", p.display()))?;
Ok(caps.get(1).unwrap().as_str().parse::<usize>().unwrap())
}
@@ -346,6 +382,28 @@ fn run_interactive(cmd:&mut Command, desc:&str)->Result<()>{
run(cmd, desc)
}
+fn run_with_stdin(cmd:&mut Command, input:&[u8], desc:&str)->Result<()>{
+ cmd.stdin(Stdio::piped()).stdout(Stdio::inherit()).stderr(Stdio::inherit());
+ let mut child = cmd.spawn().with_context(|| format!("Unable to start {}", desc))?;
+ {
+ let mut stdin = child.stdin.take().ok_or_else(|| anyhow!("Cannot open stdin for {}", desc))?;
+ stdin.write_all(input)?;
+ }
+ let status = child.wait()?;
+ if !status.success(){ bail!("Command failed: {}", desc); }
+ Ok(())
+}
+
+fn cryptsetup_run(cmd:&mut Command, pass: Option<&[u8]>, desc:&str)->Result<()>{
+ if pass.is_some() {
+ cmd.arg("--key-file").arg("-");
+ cmd.arg("--batch-mode");
+ run_with_stdin(cmd, pass.unwrap(), desc)
+ } else {
+ run_interactive(cmd, desc)
+ }
+}
+
fn require_root(op: &str) -> Result<()> {
if unsafe { libc::geteuid() } != 0 {
bail!("{op} requires root privileges.");
@@ -358,278 +416,291 @@ fn ensure_includes_nonempty(ctx:&Ctx)->Result<()>{
Ok(())
}
-fn is_excluded(ctx:&Ctx, p:&Path) -> bool {
- let Ok(cp) = fs::canonicalize(p) else { return false; };
+fn canonicalize_or_same(p: &Path) -> PathBuf {
+ fs::canonicalize(p).unwrap_or_else(|_| p.to_path_buf())
+}
+
+fn build_prune_set_for_root(abs_root: &Path, ctx: &Ctx) -> Vec<PathBuf> {
+ let mut v = Vec::new();
for ex in &ctx.exclude {
- if let Ok(cex) = fs::canonicalize(ex) {
- if cp.starts_with(&cex) { return true; }
+ let ex_abs = canonicalize_or_same(ex);
+ if ex_abs.starts_with(abs_root) {
+ v.push(ex_abs);
}
}
- false
+ // stets interne Verzeichnisse ausschließen
+ for auto in [&ctx.state_dir, &ctx.mounts_dir, &ctx.work_dir] {
+ let ex_abs = canonicalize_or_same(auto);
+ if ex_abs.starts_with(abs_root) { v.push(ex_abs); }
+ }
+ v
}
-fn truncate_logs(ctx:&Ctx)->Result<()>{
- if ctx.truncate.is_empty(){ return Ok(()); }
- for base in &ctx.truncate {
- if !base.exists(){ continue; }
- for entry in WalkDir::new(base).follow_links(false).into_iter().filter_map(|e| e.ok()) {
+/// Liefert (top_namespace->abs_root) und Manifest (alle Dateien/Symlinks) relativ zum Archiv-Root: "top/relpath".
+fn collect_manifest(ctx: &Ctx) -> Result<(HashMap<String, PathBuf>, BTreeSet<String>)> {
+ let mut manifest: BTreeSet<String> = BTreeSet::new();
+ let mut roots: HashMap<String, PathBuf> = HashMap::new();
+
+ for inc in &ctx.include {
+ let abs_root = fs::canonicalize(inc)?;
+ let top = abs_key(&abs_root)?;
+ roots.insert(top.clone(), abs_root.clone());
+
+ let prune_set = build_prune_set_for_root(&abs_root, ctx);
+
+ let iter = WalkDir::new(&abs_root)
+ .follow_links(false)
+ .into_iter()
+ .filter_entry(|e| {
+ let p = e.path();
+ let skip = prune_set.iter().any(|ex| p.starts_with(ex));
+ !skip
+ })
+ .filter_map(|e| e.ok());
+
+ for entry in iter {
let p = entry.path();
- if p.is_file() {
- let name = p.file_name().and_then(|s| s.to_str()).unwrap_or("");
- if name.ends_with(".gz") || name.ends_with(".xz") || name.ends_with(".zst") || name.ends_with(".bz2") {
- let _ = fs::remove_file(p);
- continue;
- }
- let _ = fs::OpenOptions::new().write(true).open(p).and_then(|f| { f.set_len(0)?; Ok(()) });
- }
+ let ft = entry.file_type();
+ if ft.is_dir() { continue; }
+
+ let md = fs::symlink_metadata(p)?;
+ if !(md.file_type().is_symlink() || md.is_file()) { continue; }
+
+ let rel = p
+ .strip_prefix(&abs_root)
+ .unwrap_or(Path::new(""))
+ .to_string_lossy()
+ .to_string();
+
+ let name = if rel.is_empty() { top.clone() } else { format!("{}/{}", top, rel) };
+ manifest.insert(name);
}
}
- Ok(())
+
+ Ok((roots, manifest))
}
-fn db_open_init(ctx:&Ctx) -> Result<Connection> {
- let must_init = !ctx.index_db.exists();
- let conn = Connection::open(&ctx.index_db)
- .with_context(|| format!("Opening index.sqlite: {}", ctx.index_db.display()))?;
- conn.pragma_update(None, "journal_mode", "WAL")?;
- conn.pragma_update(None, "synchronous", "NORMAL")?;
- conn.execute_batch("PRAGMA foreign_keys=ON;")?;
-
- if must_init {
- conn.execute_batch(r#"
- CREATE TABLE IF NOT EXISTS snapshots(
- id INTEGER PRIMARY KEY,
- created_at TEXT NOT NULL
- );
- CREATE TABLE IF NOT EXISTS roots(
- id INTEGER PRIMARY KEY,
- top_name TEXT UNIQUE NOT NULL,
- abs_path TEXT NOT NULL
- );
- CREATE TABLE IF NOT EXISTS files(
- id INTEGER PRIMARY KEY,
- root_id INTEGER NOT NULL,
- rel_path TEXT NOT NULL,
- kind TEXT NOT NULL,
- UNIQUE(root_id, rel_path),
- FOREIGN KEY(root_id) REFERENCES roots(id) ON DELETE CASCADE
- );
- CREATE TABLE IF NOT EXISTS events(
- snapshot_id INTEGER NOT NULL,
- file_id INTEGER NOT NULL,
- change CHAR NOT NULL CHECK(change IN ('c','m','d')),
- size INTEGER,
- mtime INTEGER,
- checksum TEXT,
- PRIMARY KEY(snapshot_id, file_id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
- FOREIGN KEY(file_id) REFERENCES files(id) ON DELETE CASCADE
- );
- "#)?;
+fn save_manifest(ctx: &Ctx, idx: usize, manifest: &BTreeSet<String>) -> Result<()> {
+ let path = ctx.manifest_path(idx);
+ let mut f = fs::File::create(&path).with_context(|| format!("manifest schreiben: {}", path.display()))?;
+ for line in manifest {
+ writeln!(f, "{line}")?;
}
- Ok(conn)
+ Ok(())
}
-fn ensure_root(conn:&Connection, include_abs:&Path) -> Result<(i64,String,PathBuf)> {
- let top = abs_key(include_abs)?;
- conn.execute("INSERT OR IGNORE INTO roots(top_name,abs_path) VALUES(?,?)",
- params![&top, include_abs.to_string_lossy()])?;
- let id = conn.query_row("SELECT id FROM roots WHERE top_name=?", params![&top], |r| r.get(0))?;
- Ok((id, top, include_abs.to_path_buf()))
+fn load_manifest(ctx:&Ctx, idx: usize) -> Result<BTreeSet<String>> {
+ let path = ctx.manifest_path(idx);
+ let text = fs::read_to_string(&path)
+ .with_context(|| format!("manifest lesen: {}", path.display()))?;
+ Ok(text.lines().map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect())
}
-fn ensure_file_row(conn:&Connection, root_id:i64, rel:&str, kind:char) -> Result<i64> {
- conn.execute("INSERT OR IGNORE INTO files(root_id,rel_path,kind) VALUES(?,?,?)",
- params![root_id, rel, kind.to_string()])?;
- Ok(conn.query_row("SELECT id FROM files WHERE root_id=? AND rel_path=?",
- params![root_id, rel], |r| r.get(0))?)
+/* ---------- TAR Streaming Filter: nur GNU dumpdir ('D') ausfiltern ---------- */
+
+fn is_zero_block(b: &[u8]) -> bool {
+ b.iter().all(|&x| x == 0)
}
-fn prev_last_events(conn:&Connection) -> Result<HashMap<(String,String),(String,i64,i64)>> {
- let mut map = HashMap::new();
- let sql = r#"
- SELECT r.top_name, f.rel_path, e.change, e.size, e.mtime
- FROM files f
- JOIN roots r ON r.id=f.root_id
- JOIN events e ON e.file_id=f.id
- WHERE e.snapshot_id = (SELECT MAX(e2.snapshot_id) FROM events e2 WHERE e2.file_id=f.id)
- "#;
- let mut stmt = conn.prepare(sql)?;
- let rows = stmt.query_map([], |row| {
- Ok((
- row.get::<_,String>(0)?,
- row.get::<_,String>(1)?,
- row.get::<_,String>(2)?,
- row.get::<_,i64>(3)?,
- row.get::<_,i64>(4)?,
- ))
- })?;
- for r in rows {
- let (top, rel, ch, sz, mt) = r?;
- map.insert((top, rel), (ch, sz, mt));
+// Achtung: nur klassische oktale Größen (reicht in der Praxis).
+fn parse_tar_size_octal(field: &[u8]) -> u64 {
+ let mut s = 0u64;
+ for &c in field {
+ if c >= b'0' && c <= b'7' {
+ s = (s << 3) + (c - b'0') as u64;
+ }
}
- Ok(map)
+ s
}
-fn deletions_up_to(conn:&Connection, upto:i64) -> Result<Vec<(String,String)>> {
- let mut out = vec![];
- let sql = r#"
- SELECT r.top_name, f.rel_path
- FROM files f
- JOIN roots r ON r.id=f.root_id
- JOIN events e ON e.file_id=f.id
- WHERE e.snapshot_id = (
- SELECT MAX(e2.snapshot_id) FROM events e2 WHERE e2.file_id=f.id AND e2.snapshot_id <= ?
- )
- AND e.change='d'
- "#;
- let mut stmt = conn.prepare(sql)?;
- let rows = stmt.query_map(params![upto], |row| Ok((row.get::<_,String>(0)?, row.get::<_,String>(1)?)))?;
- for r in rows { out.push(r?); }
- Ok(out)
-}
+/// Filtert einen Tar-Stream:
+/// - **überspringt GNU incremental dumpdirs** (Typeflag 'D')
+/// - alle anderen Header/Datenteile werden unverändert durchgereicht.
+fn forward_tar_strip_gnu_dumpdirs<R: Read, W: Write>(mut r: R, mut w: W) -> Result<()> {
+ let mut header = [0u8; 512];
+ let mut buf = vec![0u8; 1024 * 1024]; // 1 MiB Puffer
+
+ loop {
+ r.read_exact(&mut header).with_context(|| "read tar header")?;
+
+ if is_zero_block(&header) {
+ // Ende: zwei Nullblöcke weiterreichen
+ w.write_all(&header)?;
+ r.read_exact(&mut header).with_context(|| "read tar trailing zero")?;
+ w.write_all(&header)?;
+ w.flush()?;
+ break;
+ }
-#[derive(Debug, Clone)]
-struct FsMeta { size:u64, mtime:i64 }
+ let typeflag = header[156] as char;
+ let size = parse_tar_size_octal(&header[124..124+12]);
+ let padded = ((size + 511) / 512) * 512;
+
+ if typeflag == 'D' {
+ // GNU dumpdir: Daten überspringen, NICHT schreiben
+ let mut to_skip = padded;
+ while to_skip > 0 {
+ let take = to_skip.min(buf.len() as u64) as usize;
+ r.read_exact(&mut buf[..take])?;
+ to_skip -= take as u64;
+ }
+ continue;
+ }
-fn scan_current(ctx:&Ctx, conn:&Connection) -> Result<(HashMap<(String,String),FsMeta>, HashMap<String,(i64,PathBuf)>)> {
- let mut now_map = HashMap::new();
- let mut roots = HashMap::new();
- for inc in &ctx.include {
- let inc_abs = fs::canonicalize(inc)?;
- let (root_id, top_name, abs_root) = ensure_root(conn, &inc_abs)?;
- roots.insert(top_name.clone(), (root_id, abs_root.clone()));
- for e in WalkDir::new(&abs_root).follow_links(false).into_iter().filter_map(|e| e.ok()) {
- let p = e.path();
- if is_excluded(ctx, p) { continue; }
- let md = fs::symlink_metadata(p)?;
- if md.is_dir() { continue; }
- if !(md.file_type().is_symlink() || md.is_file()) { continue; }
- let rel = p.strip_prefix(&abs_root).unwrap_or(Path::new("")).to_string_lossy().to_string();
- now_map.insert((top_name.clone(), rel), FsMeta{ size: md.len(), mtime: md.mtime() });
+ // normaler Eintrag → Header + Daten durchreichen
+ w.write_all(&header)?;
+ let mut remaining = padded as i64;
+ while remaining > 0 {
+ let take = remaining.min(buf.len() as i64) as usize;
+ r.read_exact(&mut buf[..take])?;
+ w.write_all(&buf[..take])?;
+ remaining -= take as i64;
}
}
- Ok((now_map, roots))
+
+ Ok(())
}
-fn build_squash_image_pf(
+/// Erzeugt inkrementellen *tar* und streamt ihn (mit Filter) in `sqfstar`.
+/// Rückgabewert: true, falls tar mit Non-Zero beendet wurde (Warnung).
+fn build_squash_image_tar_sqfstar(
ctx: &Ctx,
out: &Path,
- changed: &[(String, String)],
- roots: &HashMap<String, (i64, PathBuf)>,
-) -> Result<()> {
- use std::collections::BTreeSet;
-
- let mut dirset: BTreeSet<String> = BTreeSet::new();
- for (top_ns, rel) in changed {
- let img_path = if rel.is_empty() {
- top_ns.clone()
- } else {
- format!("{}/{}", top_ns, rel)
- };
- let mut cur = Path::new(&img_path);
- while let Some(p) = cur.parent() {
- let s = p.to_string_lossy();
- if !s.is_empty() {
- dirset.insert(s.to_string());
- }
- if p.as_os_str().is_empty() { break; }
- cur = p;
- }
+ roots: &HashMap<String, PathBuf>,
+) -> Result<bool /* tar_nonzero */> {
+ if out.exists() {
+ fs::remove_file(out).with_context(|| format!("remove old output: {}", out.display()))?;
}
- let pf = ctx.temp_path("filelist.pf");
- {
- let mut f = fs::File::create(&pf)?;
- writeln!(f, "/ d 0755 0 0")?;
-
- let mut dirs: Vec<String> = dirset.into_iter().collect();
- dirs.sort_by_key(|d| Path::new(d).components().count());
- for d in &dirs {
- writeln!(f, "{} d 0755 0 0", d)?;
+ // sqfstar: Tar von stdin → SquashFS out
+ let mut sq = Command::new("sqfstar");
+ if ctx.comp_enable {
+ sq.arg("-comp").arg(&ctx.comp_algo);
+ }
+ sq.arg(out).arg("-");
+ sq.stdin(Stdio::piped());
+ sq.stdout(Stdio::inherit());
+ sq.stderr(Stdio::inherit());
+
+ vlog!(ctx, "[backup] sqfstar ← tar (incremental stream, dumpdir-filtered) → {}", out.display());
+ let mut sq_child = sq.spawn().with_context(|| "Unable to start sqfstar")?;
+ let sq_stdin = sq_child.stdin.take().ok_or_else(|| anyhow!("cannot open sqfstar stdin"))?;
+
+ // tar: POSIX/PAX + GNU listed-incremental; Pfade relativ zu /
+ let mut tar = Command::new("tar");
+ tar.arg("--format=posix")
+ .arg("-C").arg("/")
+ .arg("--null")
+ .arg("--files-from=-")
+ .arg("--listed-incremental").arg(&ctx.tar_snapshot)
+ .arg("-cf").arg("-")
+ .arg("--numeric-owner")
+ .arg("--ignore-failed-read");
+
+ // Excludes (relativ zu /)
+ let mut all_excludes: Vec<PathBuf> = ctx.exclude.clone();
+ for auto in [&ctx.state_dir, &ctx.mounts_dir, &ctx.work_dir] {
+ all_excludes.push(auto.clone());
+ }
+ for ex in all_excludes {
+ let ex_rel = ex.to_string_lossy().trim_start_matches('/').to_string();
+ if !ex_rel.is_empty() {
+ tar.arg(format!("--exclude={}", ex_rel));
}
}
+ tar.stdin(Stdio::piped());
+ tar.stdout(Stdio::piped());
+ tar.stderr(Stdio::inherit());
+ let mut tar_child = tar.spawn().with_context(|| "Unable to start tar")?;
+
+ // Pfadliste: Roots *relativ* zu / (z.B. "usr", "var", "home/lk")
{
- let mut f = fs::OpenOptions::new().append(true).open(&pf)?;
- for (top_ns, rel) in changed {
- let (_id, abs_root) = roots.get(top_ns)
- .ok_or_else(|| anyhow!("unknown root {}", top_ns))?;
- let src = if rel.is_empty() { abs_root.clone() } else { abs_root.join(rel) };
- let md = fs::symlink_metadata(&src)?;
-
- let img_path = if rel.is_empty() { top_ns.clone() } else { format!("{}/{}", top_ns, rel) };
-
- if md.file_type().is_symlink() {
- // Symlink: Ziel lesen und als 's' schreiben
- let target = fs::read_link(&src)?.to_string_lossy().to_string();
- writeln!(f, "{} s 0777 0 0 {}", img_path, escape_pf(&target))?;
- } else if md.is_file() {
- writeln!(f, "{} l {}", img_path, escape_pf(&src.to_string_lossy()))?;
- }
+ let mut w = std::io::BufWriter::new(
+ tar_child.stdin.take().ok_or_else(|| anyhow!("cannot open tar stdin"))?
+ );
+ for (_top, abs_root) in roots.iter() {
+ let rel = abs_root.to_string_lossy().trim_start_matches('/').to_string();
+ let item = if rel.is_empty() { ".".to_string() } else { rel };
+ w.write_all(item.as_bytes())?;
+ w.write_all(&[0])?; // NUL
}
+ w.flush()?;
+ drop(w);
}
- let empty_src = ctx.temp_path("empty.src");
- fs::create_dir_all(&empty_src)?;
+ // tar stdout → (Filter: strip dumpdir) → sqfstar stdin
+ {
+ let tar_stdout = tar_child.stdout.take().ok_or_else(|| anyhow!("cannot open tar stdout"))?;
+ forward_tar_strip_gnu_dumpdirs(tar_stdout, sq_stdin)?;
+ }
- let mut cmd = Command::new("mksquashfs");
- cmd.arg(&empty_src)
- .arg(out)
- .arg("-no-progress")
- .arg("-no-recovery")
- .arg("-always-use-fragments")
- .arg("-pf").arg(&pf);
+ let tar_status = tar_child.wait().with_context(|| "waiting for tar failed")?;
+ let tar_code = tar_status.code().unwrap_or(-1);
+ let tar_nonzero = tar_code != 0;
+ if tar_nonzero {
+ eprintln!("[warn] tar exited with code {tar_code}; continuing (image may be partial).");
+ }
- if ctx.comp_enable {
- cmd.arg("-comp").arg(&ctx.comp_algo);
- if let Some(extra) = ctx.comp_args.as_ref() {
- for tok in shell_split(extra) { cmd.arg(tok); }
- }
- } else {
- cmd.arg("-noI").arg("-noD").arg("-noF");
+ let sq_status = sq_child.wait().with_context(|| "waiting for sqfstar failed")?;
+ if !sq_status.success() {
+ bail!("sqfstar failed with status {}", sq_status);
}
- run(&mut cmd, "mksquashfs (-pf)")?;
- Ok(())
+ Ok(tar_nonzero)
}
-fn escape_pf<S: AsRef<str>>(s: S) -> String {
- let s = s.as_ref();
- if s.chars().any(|c| c.is_whitespace() || c == '"') {
- let escaped = s.replace('\\', "\\\\").replace('"', "\\\"");
- format!("\"{}\"", escaped)
- } else {
- s.to_string()
+fn truncate_logs(ctx:&Ctx)->Result<()>{
+ if ctx.truncate.is_empty(){ return Ok(()); }
+ for base in &ctx.truncate {
+ if !base.exists(){ continue; }
+ for entry in WalkDir::new(base).follow_links(false).into_iter().filter_map(|e| e.ok()) {
+ let p = entry.path();
+ if p.is_file() {
+ let name = p.file_name().and_then(|s| s.to_str()).unwrap_or("");
+ if name.ends_with(".gz") || name.ends_with(".xz") || name.ends_with(".zst") || name.ends_with(".bz2") {
+ let _ = fs::remove_file(p);
+ continue;
+ }
+ let _ = fs::OpenOptions::new().write(true).open(p).and_then(|f| { f.set_len(0)?; Ok(()) });
+ }
+ }
}
+ Ok(())
}
fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{
- require_root("Creatintg LUKS container")?;
+ require_root("Creating LUKS container")?;
+
let plain_size = fs::metadata(plain)?.len();
let size = plain_size + 16 * 1024 * 1024;
{
let f = fs::File::create(out_luks)?;
f.set_len(size)?;
}
+
+ vlog!(ctx, "[luks] luksFormat {}", out_luks.display());
let mut c = Command::new("cryptsetup");
if let Some(args)=ctx.luks_create_args.as_ref(){ for t in shell_split(args){ c.arg(t);} }
c.arg("luksFormat").arg(out_luks);
- run_interactive(&mut c, "cryptsetup luksFormat")?;
+ cryptsetup_run(&mut c, ctx.luks_pass.as_deref(), "cryptsetup luksFormat")?;
let mapper = format!("squashr_{}", parse_snap_index(out_luks).unwrap_or(0));
+ vlog!(ctx, "[luks] open {} as {}", out_luks.display(), mapper);
let mut o = Command::new("cryptsetup");
if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} }
o.arg("open").arg(out_luks).arg(&mapper);
- run_interactive(&mut o, "cryptsetup open")?;
+ cryptsetup_run(&mut o, ctx.luks_pass.as_deref(), "cryptsetup open")?;
let dev = format!("/dev/mapper/{}", mapper);
+ vlog!(ctx, "[luks] dd {} → {}", plain.display(), dev);
let mut dd = Command::new("dd");
dd.arg(format!("if={}", plain.display()))
.arg(format!("of={}", dev))
.arg("bs=4M").arg("status=none").arg("conv=fsync");
run(&mut dd, "dd")?;
+ vlog!(ctx, "[luks] close {}", mapper);
let mut close = Command::new("cryptsetup");
close.arg("close").arg(&mapper);
run(&mut close, "cryptsetup close")?;
@@ -637,43 +708,105 @@ fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{
Ok(())
}
+/* ---------- Mount Helfer: Kernel → FUSE Fallback ---------- */
+
+fn try_mount_kernel_squashfs(source:&Path, mnt:&Path, use_loop:bool) -> Result<bool> {
+ let mut cmd = Command::new("mount");
+ cmd.arg("-t").arg("squashfs");
+ if use_loop { cmd.arg("-o").arg("loop,ro"); } else { cmd.arg("-o").arg("ro"); }
+ cmd.arg(source).arg(mnt);
+ let status = cmd.status().with_context(|| "start mount squashfs (kernel)")?;
+ Ok(status.success())
+}
+
+fn try_mount_fuse_squashfs(source:&Path, mnt:&Path) -> Result<bool> {
+ let status = Command::new("squashfuse")
+ .arg("-o").arg("ro")
+ .arg(source)
+ .arg(mnt)
+ .status();
+
+ match status {
+ Ok(s) => Ok(s.success()),
+ Err(e) => Err(anyhow!("squashfuse nicht startbar: {e}")),
+ }
+}
+
+/* ---------- Mount-Info Utilities ---------- */
+
+#[derive(Debug)]
+struct MountEntry { src:String, tgt:PathBuf, fstype:String, opts:String }
+
+fn read_proc_mounts() -> Result<Vec<MountEntry>> {
+ let text = fs::read_to_string("/proc/mounts").context("read /proc/mounts")?;
+ let mut out = Vec::new();
+ for line in text.lines() {
+ // /proc/mounts: src tgt fstype opts 0 0
+ let mut it = line.split_whitespace();
+ let (Some(src), Some(tgt), Some(fstype), Some(opts)) = (it.next(), it.next(), it.next(), it.next()) else { continue };
+ out.push(MountEntry{
+ src: src.to_string(),
+ tgt: PathBuf::from(tgt),
+ fstype: fstype.to_string(),
+ opts: opts.to_string(),
+ });
+ }
+ Ok(out)
+}
+
+fn is_target_mounted(tgt: &Path) -> bool {
+ if let Ok(ms) = read_proc_mounts() {
+ let at = abspath(tgt);
+ ms.into_iter().any(|m| abspath(&m.tgt) == at)
+ } else {
+ false
+ }
+}
+
+fn sort_paths_deep_first(mut v: Vec<PathBuf>) -> Vec<PathBuf> {
+ v.sort_by_key(|p| std::cmp::Reverse(p.components().count()));
+ v.dedup();
+ v
+}
+
+fn loop_backing_file(dev: &str) -> Option<PathBuf> {
+ // Erwartet "/dev/loopX"
+ let name = Path::new(dev).file_name()?.to_string_lossy().to_string();
+ let candidates = [
+ format!("/sys/block/{}/loop/backing_file", name),
+ format!("/sys/devices/virtual/block/{}/loop/backing_file", name),
+ ];
+ for p in candidates {
+ if let Ok(s) = fs::read_to_string(&p) {
+ let t = s.trim();
+ if !t.is_empty() { return Some(PathBuf::from(t)); }
+ }
+ }
+ None
+}
+
+/* ---------- Backup ---------- */
+
fn cmd_backup(ctx:&mut Ctx)->Result<()>{
ensure_includes_nonempty(ctx)?;
truncate_logs(ctx)?;
- let mut conn = db_open_init(ctx)?;
- let tx = conn.transaction()?;
- let last_id: i64 = tx.query_row("SELECT IFNULL(MAX(id),0) FROM snapshots", [], |r| r.get(0))?;
- let snap_id = last_id + 1;
- tx.execute("INSERT INTO snapshots(id,created_at) VALUES(?,datetime('now'))", params![snap_id])?;
-
- let (now_map, roots) = scan_current(ctx, &tx)?;
- let prev_map = prev_last_events(&tx)?;
-
- let mut changed: Vec<(String,String)> = vec![];
- for ((top, rel), meta) in &now_map {
- match prev_map.get(&(top.clone(), rel.clone())) {
- None => changed.push((top.clone(), rel.clone())),
- Some((ch, psz, pmt)) => {
- if ch == "d" || *psz as u64 != meta.size || *pmt as i64 != meta.mtime {
- changed.push((top.clone(), rel.clone()));
- }
- }
- }
- }
+ let snaps = ctx.list_snapshots()?;
+ let next_idx = snaps.len() + 1;
- let prev_present: HashSet<(String,String)> = prev_map.iter()
- .filter(|(_k,(ch,_,_))| ch != "d")
- .map(|(k,_v)| k.clone()).collect();
- let now_keys: HashSet<(String,String)> = now_map.keys().cloned().collect();
- let deleted: Vec<(String,String)> = prev_present.difference(&now_keys).cloned().collect();
+ // 1) Manifest aufnehmen (inkl. Excludes & interne Verzeichnisse)
+ let (roots, manifest_now) = collect_manifest(ctx)?;
+ let manifest_prev = if next_idx > 1 {
+ load_manifest(ctx, next_idx - 1).ok()
+ } else { None };
- // SquashFS deterministisch via -pf erzeugen
- let idx = snap_id as usize;
+ // 2) Falls keine Änderung: minimalistisches Image erstellen (wie bisher)
+ let no_changes = manifest_prev.as_ref().map_or(false, |m| m == &manifest_now);
let plain_img = ctx.temp_path("snapshot.plain.squashfs");
- if changed.is_empty() {
- // Leeres Delta → minimales Image
+ let mut tar_warn = false;
+ if no_changes {
+ vlog!(ctx, "[backup] no changes → creating minimal image {}", plain_img.display());
let empty_src = ctx.temp_path("empty.src");
fs::create_dir_all(&empty_src)?;
let mut cmd = Command::new("mksquashfs");
@@ -684,42 +817,29 @@ fn cmd_backup(ctx:&mut Ctx)->Result<()>{
}
run(&mut cmd, "mksquashfs (empty)")?;
} else {
- build_squash_image_pf(ctx, &plain_img, &changed, &roots)?;
+ // 3) tar (listed-incremental) → Filter (strip 'D') → sqfstar
+ tar_warn = build_squash_image_tar_sqfstar(ctx, &plain_img, &roots)?;
}
+ // 4) Optional: LUKS-Container schreiben
let final_path = if ctx.luks_enable {
- let out = ctx.snapshot_path(idx, true);
+ let out = ctx.snapshot_path(next_idx, true);
encrypt_into_luks(ctx, &plain_img, &out)?;
fs::remove_file(&plain_img).ok();
out
} else {
- let out = ctx.snapshot_path(idx, false);
+ let out = ctx.snapshot_path(next_idx, false);
fs::rename(&plain_img, &out)?;
out
};
- for (top, rel) in &changed {
- let (root_id, abs_root) = roots.get(top).unwrap();
- let real = if rel.is_empty(){ abs_root.clone() } else { abs_root.join(rel) };
- let md = fs::symlink_metadata(&real)?;
- let kind = if md.file_type().is_symlink() { 'l' } else { 'f' };
- let fid = ensure_file_row(&tx, *root_id, rel, kind)?;
- let ev = match prev_map.get(&(top.clone(), rel.clone())) {
- None => "c",
- Some((ch,_,_)) if ch == "d" => "c",
- _ =>"m"
- };
- tx.execute("INSERT OR REPLACE INTO events(snapshot_id,file_id,change,size,mtime,checksum) VALUES(?,?,?,?,?,NULL)",
- params![snap_id, fid, ev, md.len() as i64, md.mtime()])?;
- }
- for (top, rel) in &deleted {
- let (root_id, _abs) = roots.get(top).unwrap();
- let fid = ensure_file_row(&tx, *root_id, rel, 'f')?;
- tx.execute("INSERT OR REPLACE INTO events(snapshot_id,file_id,change,size,mtime,checksum) VALUES(?,?,?,?,?,NULL)",
- params![snap_id, fid, "d", 0i64, 0i64])?;
+ // 5) Manifest speichern
+ save_manifest(ctx, next_idx, &manifest_now)?;
+
+ if tar_warn {
+ eprintln!("[warn] Snapshot {:04} created, but tar reported non-zero exit. Some files may be missing or changed during read.", next_idx);
}
- tx.commit()?;
println!("Created new snapshot: {:04}", final_path.display());
rotate_if_needed(ctx)?;
@@ -737,115 +857,31 @@ fn rotate_if_needed(ctx:&mut Ctx)->Result<()>{
Ok(())
}
-fn present_state_up_to(conn:&Connection, upto:i64) -> Result<Vec<(String,String,i64,i64,String)>> {
- let present_sql = r#"
- SELECT r.top_name, f.rel_path, e.size, e.mtime, f.kind
- FROM files f
- JOIN roots r ON r.id=f.root_id
- JOIN events e ON e.file_id=f.id
- WHERE e.snapshot_id = (
- SELECT MAX(e2.snapshot_id) FROM events e2 WHERE e2.file_id=f.id AND e2.snapshot_id <= ?
- ) AND e.change!='d'
- "#;
- let mut stmt = conn.prepare(present_sql)?;
- let rows = stmt.query_map(params![upto], |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?, r.get(4)?)))?;
- let mut v = vec![];
- for r in rows { v.push(r?); }
- Ok(v)
-}
-
-fn merge_first_two(ctx:&mut Ctx)->Result<()>{
- require_root("merge (mounts / overlay)")?;
- let snaps = ctx.list_snapshots()?;
- if snaps.len() < 2 { return Ok(()); }
- let s1 = &snaps[0];
- let s2 = &snaps[1];
- let idx1 = parse_snap_index(s1)?;
- let idx2 = parse_snap_index(s2)?;
- if idx2 != idx1+1 { bail!("Unexpected indices: {:04} + {:04}", idx1, idx2); }
-
- let m1 = ctx.mounts_dir.join(format!("merge_{:04}", idx1));
- let m2 = ctx.mounts_dir.join(format!("merge_{:04}", idx2));
- fs::create_dir_all(&m1)?;
- fs::create_dir_all(&m2)?;
- mount_image_ro(ctx, s1, &m1)?;
- mount_image_ro(ctx, s2, &m2)?;
-
- let upper = ctx.temp_path("merge.upper");
- let work = ctx.temp_path("merge.work");
- let view = ctx.temp_path("merge.view");
- fs::create_dir_all(&upper)?;
- fs::create_dir_all(&work)?;
- fs::create_dir_all(&view)?;
- let loweropt = format!("{}:{}", abspath(&m1).display(), abspath(&m2).display()); // s2 über s1
- mount_overlay(&loweropt, &upper, &work, &view)?;
+/* ---------- Whiteouts / Mount / Overlay ---------- */
- let tmp_plain = ctx.temp_path("merge.plain.sqsh");
- let mut cmd = Command::new("mksquashfs");
- cmd.arg(&view).arg(&tmp_plain).arg("-no-progress").arg("-no-recovery");
- if ctx.comp_enable {
- cmd.arg("-comp").arg(&ctx.comp_algo);
- if let Some(extra)=ctx.comp_args.as_ref(){ for tok in shell_split(extra){ cmd.arg(tok);} }
+fn create_whiteouts_unlink_list(ctx:&Ctx, upto:usize)->Result<Vec<String>>{
+ if upto == 0 { return Ok(vec![]); }
+ let present = load_manifest(ctx, upto).unwrap_or_default();
+ let mut union_past: BTreeSet<String> = BTreeSet::new();
+ for i in 1..upto {
+ let m = load_manifest(ctx, i).unwrap_or_default();
+ for s in m { union_past.insert(s); }
}
- run(&mut cmd, "mksquashfs (merge)")?;
-
- let _ = umount(&view);
- let _ = umount(&m2);
- let _ = umount(&m1);
-
- let is_luks = s1.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
- let out_path = ctx.snapshot_path(idx1, is_luks);
- if s1.exists(){ fs::remove_file(s1)?; }
- if is_luks {
- encrypt_into_luks(ctx, &tmp_plain, &out_path)?;
- fs::remove_file(&tmp_plain).ok();
- } else {
- fs::rename(&tmp_plain, &out_path)?;
- }
- if s2.exists(){ fs::remove_file(s2)?; }
- let rest = ctx.list_snapshots()?;
- for p in rest {
- let n = parse_snap_index(&p)?;
- if n > idx2 {
- let is_luks = p.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
- let newp = ctx.snapshot_path(n-1, is_luks);
- fs::rename(&p, &newp)?;
- }
- }
-
- let mut conn = db_open_init(ctx)?;
- let tx = conn.transaction()?;
-
- let present = present_state_up_to(&tx, 2)?;
- tx.execute("DELETE FROM events WHERE snapshot_id IN (1,2)", [])?;
- tx.execute("DELETE FROM snapshots WHERE id IN (1,2)", [])?;
- tx.execute("INSERT INTO snapshots(id,created_at) VALUES(1,datetime('now'))", [])?;
- for (top, rel, sz, mt, kind) in present {
- let root_id: i64 = tx.query_row("SELECT id FROM roots WHERE top_name=?", params![top], |r| r.get(0))?;
- let fid = ensure_file_row(&tx, root_id, &rel, kind.chars().next().unwrap_or('f'))?;
- tx.execute("INSERT OR REPLACE INTO events(snapshot_id,file_id,change,size,mtime,checksum) VALUES(?,?,?,?,?,NULL)",
- params![1i64, fid, "c", sz, mt])?;
- }
- tx.execute("UPDATE events SET snapshot_id = snapshot_id - 1 WHERE snapshot_id > 2", [])?;
- tx.execute("UPDATE snapshots SET id = id - 1 WHERE id > 2", [])?;
- tx.commit()?;
-
- println!("Merged snapshots {:04} + {:04} → {:04}.", idx1, idx2, idx1);
- Ok(())
+ let deletions: Vec<String> = union_past.difference(&present).cloned().collect();
+ Ok(deletions)
}
-fn create_whiteouts(upper:&Path, deletes:&[(String,String)]) -> Result<()> {
- for (top_ns, rel) in deletes {
- let p = Path::new(rel);
- let parent = upper.join(top_ns).join(p.parent().unwrap_or(Path::new("")));
- fs::create_dir_all(&parent)?;
- let base = p.file_name().ok_or_else(|| anyhow!("Empty filename: {}", rel))?
- .to_string_lossy().to_string();
- let wh = parent.join(format!(".wh.{}", base));
- if wh.exists() { continue; }
- let mut cmd = Command::new("mknod");
- cmd.arg(&wh).arg("c").arg("0").arg("0");
- run(&mut cmd, "mknod whiteout")?;
+fn apply_whiteouts_via_unlink(view_root:&Path, deletes:&[String]) -> Result<()> {
+ for rel in deletes {
+ let p = view_root.join(rel);
+ match fs::remove_file(&p) {
+ Ok(_) => {},
+ Err(e) if e.kind() == ErrorKind::IsADirectory => {
+ let _ = fs::remove_dir(&p);
+ }
+ Err(e) if e.kind() == ErrorKind::NotFound => { /* okay */ }
+ Err(e) => return Err(anyhow!("Unlink failed for {}: {}", p.display(), e)),
+ }
}
Ok(())
}
@@ -853,29 +889,64 @@ fn create_whiteouts(upper:&Path, deletes:&[(String,String)]) -> Result<()> {
fn mount_image_ro(ctx:&Ctx, img:&Path, mnt:&Path)->Result<()>{
require_root("Mount SquashFS")?;
let mnt_abs = abspath(mnt);
+
if img.extension().and_then(|e| e.to_str()) == Some("luks") {
+ // LUKS → /dev/mapper/<mapper>
let mapper = format!(
"squashr_mount_{}",
img.file_stem().and_then(|s| s.to_str()).unwrap_or("img")
);
let dev = format!("/dev/mapper/{}", mapper);
+ // evtl. Alt-Mapping schließen
let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status();
let mut o = Command::new("cryptsetup");
if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} }
o.arg("open").arg(img).arg(&mapper);
- run_interactive(&mut o, "cryptsetup open (mount)")?;
+ cryptsetup_run(&mut o, ctx.luks_pass.as_deref(), "cryptsetup open (mount)")?;
+
+ // 1) Kernel-Mount versuchen
+ match try_mount_kernel_squashfs(Path::new(&dev), &mnt_abs, false) {
+ Ok(true) => return Ok(()),
+ Ok(false) => {
+ eprintln!("[warn] Kernel-SquashFS-Mount fehlgeschlagen – versuche FUSE (squashfuse).");
+ }
+ Err(e) => {
+ eprintln!("[warn] Kernel-SquashFS-Mount Fehler: {e} – versuche FUSE (squashfuse).");
+ }
+ }
- let mut cmd = Command::new("mount");
- cmd.arg("-t").arg("squashfs").arg("-o").arg("ro").arg(&dev).arg(&mnt_abs);
- run(&mut cmd, "mount squashfs (luks)")?;
+ // 2) FUSE-Fallback versuchen
+ match try_mount_fuse_squashfs(Path::new(&dev), &mnt_abs) {
+ Ok(true) => return Ok(()),
+ Ok(false) => {
+ // Mapping sauber schließen
+ let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status();
+ bail!("FUSE-Mount (squashfuse) fehlgeschlagen. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'.");
+ }
+ Err(e) => {
+ let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status();
+ bail!("{e}. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'.");
+ }
+ }
} else {
- let mut cmd = Command::new("mount");
- cmd.arg("-t").arg("squashfs").arg("-o").arg("loop,ro").arg(img).arg(&mnt_abs);
- run(&mut cmd, "mount squashfs")?;
+ // Plain SquashFS-Datei
+ match try_mount_kernel_squashfs(img, &mnt_abs, true) {
+ Ok(true) => return Ok(()),
+ Ok(false) => {
+ eprintln!("[warn] Kernel-SquashFS-Mount fehlgeschlagen – versuche FUSE (squashfuse).");
+ }
+ Err(e) => {
+ eprintln!("[warn] Kernel-SquashFS-Mount Fehler: {e} – versuche FUSE (squashfuse).");
+ }
+ }
+ match try_mount_fuse_squashfs(img, &mnt_abs) {
+ Ok(true) => Ok(()),
+ Ok(false) => bail!("FUSE-Mount (squashfuse) fehlgeschlagen. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'."),
+ Err(e) => bail!("{e}. Prüfe Kernelmodul 'squashfs' oder installiere 'squashfuse'."),
+ }
}
- Ok(())
}
fn mount_overlay(lowerdirs:&str, upper:&Path, work:&Path, target:&Path)->Result<()>{
@@ -894,47 +965,32 @@ fn mount_overlay(lowerdirs:&str, upper:&Path, work:&Path, target:&Path)->Result<
Ok(())
}
-fn apply_whiteouts_via_unlink(view_root:&Path, deletes:&[(String,String)]) -> Result<()> {
- for (top_ns, rel) in deletes {
- let p = view_root.join(top_ns).join(rel);
- match fs::remove_file(&p) {
- Ok(_) => {},
- Err(e) if e.kind() == ErrorKind::NotFound => {
- // okay
- },
- Err(e) => return Err(anyhow!("Unlink failed for {}: {}", p.display(), e)),
- }
- }
- Ok(())
-}
+/* ---------- Umount (robust, idempotent) ---------- */
fn umount(path:&Path)->Result<()>{
require_root("umount")?;
+ // Wenn nicht gemountet → OK (idempotent)
+ if !is_target_mounted(path) {
+ return Ok(());
+ }
+ // Erst normal versuchen
let mut cmd = Command::new("umount");
cmd.arg(path);
- run(&mut cmd, "umount")?;
- Ok(())
-}
-
-// --- NEU: robustes Umount, ignoriert "not mounted" (Exit-Code 32)
-fn umount_ignore_not_mounted(path:&Path)->Result<()>{
- require_root("umount")?;
- let status = Command::new("umount")
- .arg(path)
- .status()
- .with_context(|| format!("umount {}", path.display()))?;
- if !status.success() {
- if let Some(32) = status.code() {
- // EX_NOTMNT – war kein Mountpoint; passt.
- return Ok(());
- }
- bail!("umount failed for {}", path.display());
+ let status = cmd.status().with_context(|| "umount aufrufen")?;
+ if status.success() {
+ return Ok(());
}
- Ok(())
-}
+ // FUSE-Fälle: fusermount3/fusermount
+ let _ = Command::new("fusermount3").arg("-u").arg(path).status();
+ let status2 = Command::new("fusermount").arg("-u").arg(path).status();
+ if let Ok(s) = status2 {
+ if s.success() { return Ok(()); }
+ }
+ // Letzter Versuch: lazy
+ let _ = Command::new("umount").arg("-l").arg(path).status();
+ if !is_target_mounted(path) { return Ok(()); }
-fn crypt_close_ignore(name:&str){
- let _ = Command::new("cryptsetup").arg("close").arg(name).status();
+ bail!("umount/fusermount fehlgeschlagen für {}", path.display());
}
fn find_snapshot_file(ctx:&Ctx, idx:usize)->Result<PathBuf>{
@@ -982,9 +1038,8 @@ fn cmd_mount(ctx:&mut Ctx, s:Option<usize>, target:&Path)->Result<()>{
let loweropt = lowers.iter().rev().map(|p| p.display().to_string()).join(":"); // neuester zuerst
mount_overlay(&loweropt, &upper, &work, &abspath(target))?;
- // <<<<<< NEU: Whiteouts NACH dem Mount via unlink erzeugen
- let mut conn = db_open_init(ctx)?;
- let deletes = deletions_up_to(&conn, upto as i64)?;
+ // Whiteouts/Unlinks anhand der Manifeste bis 'upto'
+ let deletes = create_whiteouts_unlink_list(ctx, upto)?;
if !deletes.is_empty() {
apply_whiteouts_via_unlink(&abspath(target), &deletes)?;
}
@@ -994,62 +1049,7 @@ fn cmd_mount(ctx:&mut Ctx, s:Option<usize>, target:&Path)->Result<()>{
Ok(())
}
-fn cmd_umount(ctx:&mut Ctx, target:&Path)->Result<()>{
- require_root("umount")?;
-
- // 1) Overlay-Ziel aushängen (falls gemountet)
- match umount_ignore_not_mounted(&abspath(target)) {
- Ok(_) => println!("Umounted {} (overlay/target, falls vorhanden).", abspath(target).display()),
- Err(e) => println!("Hinweis: {} (fahre fort)", e),
- }
-
- // 2) Alle per-snapshot Mounts unter mounts_dir aushängen
- let mut entries: Vec<_> = fs::read_dir(&ctx.mounts_dir)?
- .filter_map(|e| e.ok())
- .map(|e| e.path())
- .filter(|p| p.is_dir())
- .collect();
- // in umgekehrter Reihenfolge, damit höhere Indizes zuerst fallen
- entries.sort();
- entries.reverse();
-
- for mnt in &entries {
- let _ = umount_ignore_not_mounted(mnt);
- }
-
- // 3) LUKS-Mapper schließen, wenn vorhanden
- for mnt in &entries {
- if let Some(name) = mnt.file_name().and_then(|s| s.to_str()) {
- // Verzeichnisnamen sind "snap_0001" / "merge_0001"
- if let Some(idx_str) = name.split('_').nth(1) {
- // Versuche beide möglichen Mapper-Namen (je nach file_stem):
- // "squashr_mount_0001" und "squashr_mount_0001.squashfs"
- let mapper_a = format!("squashr_mount_{}", idx_str);
- let mapper_b = format!("squashr_mount_{}.squashfs", idx_str);
- crypt_close_ignore(&mapper_a);
- crypt_close_ignore(&mapper_b);
- }
- }
- }
-
- // 4) Leere Mount-Verzeichnisse wegräumen (optional & best effort)
- for mnt in entries {
- let _ = fs::remove_dir(mnt);
- }
-
- // 5) Verwaiste temporäre Upper/Work-Verzeichnisse aus der letzten Mount-Session aufräumen
- if let Ok(ws) = fs::read_dir(&ctx.work_dir) {
- for e in ws.filter_map(|e| e.ok()) {
- if let Some(n) = e.file_name().to_str() {
- if n.starts_with("overlay.upper_") || n.starts_with("overlay.work_") {
- let _ = fs::remove_dir_all(e.path());
- }
- }
- }
- }
- println!("Umount abgeschlossen.");
- Ok(())
-}
+/* ---------- Delete / New ---------- */
fn cmd_delete(ctx:&mut Ctx, s:usize)->Result<()>{
let path_plain = ctx.snapshot_path(s, false);
@@ -1058,6 +1058,10 @@ fn cmd_delete(ctx:&mut Ctx, s:usize)->Result<()>{
if path_plain.exists(){ fs::remove_file(&path_plain)?; }
if path_luks.exists(){ fs::remove_file(&path_luks)?; }
+ // Manifest des gelöschten Snapshots entfernen
+ let _ = fs::remove_file(ctx.manifest_path(s));
+
+ // Alle nachfolgenden Snapshots und Manifeste umnummerieren
let snaps = ctx.list_snapshots()?;
for p in snaps {
let n = parse_snap_index(&p)?;
@@ -1067,28 +1071,188 @@ fn cmd_delete(ctx:&mut Ctx, s:usize)->Result<()>{
fs::rename(&p, &newp)?;
}
}
+ // Manifeste verschieben
+ let mut k = s + 1;
+ loop {
+ let from = ctx.manifest_path(k);
+ if !from.exists() { break; }
+ let to = ctx.manifest_path(k - 1);
+ fs::rename(&from, &to)?;
+ k += 1;
+ }
- // DB anpassen
- let mut conn = db_open_init(ctx)?;
- let tx = conn.transaction()?;
- tx.execute("DELETE FROM events WHERE snapshot_id=?", params![s as i64])?;
- tx.execute("DELETE FROM snapshots WHERE id=?", params![s as i64])?;
- tx.execute("UPDATE events SET snapshot_id = snapshot_id - 1 WHERE snapshot_id > ?", params![s as i64])?;
- tx.execute("UPDATE snapshots SET id = id - 1 WHERE id > ?", params![s as i64])?;
- tx.commit()?;
println!("Deleted snapshot {}, decrementing children.", s);
Ok(())
}
fn cmd_new(ctx:&mut Ctx)->Result<()>{
for s in ctx.list_snapshots()? { fs::remove_file(s)?; }
- let mut conn = db_open_init(ctx)?;
- conn.execute("DELETE FROM events", [])?;
- conn.execute("DELETE FROM snapshots", [])?;
- println!("Database cleared. Creating new initial snapshot.");
+ // Manifeste und tar.snapshot löschen
+ for e in fs::read_dir(&ctx.state_dir)? {
+ let p = e?.path();
+ if let Some(name) = p.file_name().and_then(|s| s.to_str()) {
+ if name.starts_with("manifest_") || name == "tar.snapshot" {
+ let _ = fs::remove_file(&p);
+ }
+ }
+ }
+ println!("State cleared. Creating new initial snapshot.");
cmd_backup(ctx)
}
+/* ---------- Umount Command ---------- */
+
+fn cmd_umount(ctx:&mut Ctx, target: Option<&Path>) -> Result<()> {
+ require_root("umount")?;
+
+ let mounts_before = read_proc_mounts()?;
+ let mut todo: Vec<PathBuf> = Vec::new();
+ let mut mappers_in_use: HashSet<String> = HashSet::new();
+ let mut loops_in_use: HashSet<String> = HashSet::new();
+
+ if let Some(t) = target {
+ // nur diesen Ziel-Pfad (falls gemountet)
+ if is_target_mounted(t) {
+ todo.push(abspath(t));
+ }
+ // Quelle merken (für späteres Close/Detach)
+ if let Some(m) = mounts_before.iter().find(|m| abspath(&m.tgt) == abspath(t)) {
+ if m.src.starts_with("/dev/mapper/squashr_") {
+ mappers_in_use.insert(Path::new(&m.src).file_name().unwrap_or_default().to_string_lossy().to_string());
+ } else if m.src.starts_with("/dev/loop") {
+ loops_in_use.insert(m.src.clone());
+ }
+ }
+ } else {
+ // 1) Alle Mounts unter mounts_dir (snap_XXXX, merge_XXXX, etc.)
+ for m in &mounts_before {
+ if m.tgt.starts_with(&ctx.mounts_dir) {
+ todo.push(m.tgt.clone());
+ }
+ }
+ // 2) Overlay-Ziel(e), die von SquashR erstellt wurden: erkennbar an upperdir in ctx.work_dir
+ for m in &mounts_before {
+ if m.fstype == "overlay" && m.opts.contains("upperdir=") {
+ if let Some(start) = m.opts.find("upperdir=") {
+ let rest = &m.opts[start + "upperdir=".len()..];
+ let upper = rest.split(',').next().unwrap_or("");
+ if upper.starts_with(ctx.work_dir.to_string_lossy().as_ref()) {
+ todo.push(m.tgt.clone());
+ }
+ }
+ }
+ }
+ // 3) Einzelsnapshot-Mounts (LUKS): Quelle /dev/mapper/squashr_*
+ for m in &mounts_before {
+ if m.fstype == "squashfs" && m.src.starts_with("/dev/mapper/squashr_") {
+ todo.push(m.tgt.clone());
+ let mapper = Path::new(&m.src).file_name().unwrap_or_default().to_string_lossy().to_string();
+ mappers_in_use.insert(mapper);
+ }
+ }
+ // 4) Einzelsnapshot-Mounts (Plain/Loop): Quelle /dev/loopX mit Backing-File im state_dir
+ for m in &mounts_before {
+ if m.fstype == "squashfs" && m.src.starts_with("/dev/loop") {
+ if let Some(back) = loop_backing_file(&m.src) {
+ if back.starts_with(&ctx.state_dir) {
+ todo.push(m.tgt.clone());
+ loops_in_use.insert(m.src.clone());
+ }
+ }
+ }
+ }
+ // 5) FUSE-Mounts von SquashFS-Dateien aus state_dir (oder Mapper)
+ for m in &mounts_before {
+ if m.fstype.starts_with("fuse") {
+ let src_path = PathBuf::from(&m.src);
+ if src_path.starts_with(&ctx.state_dir) && src_path.extension().and_then(|e| e.to_str()) == Some("squashfs") {
+ todo.push(m.tgt.clone());
+ }
+ if m.src.starts_with("/dev/mapper/squashr_") {
+ todo.push(m.tgt.clone());
+ let mapper = Path::new(&m.src).file_name().unwrap_or_default().to_string_lossy().to_string();
+ mappers_in_use.insert(mapper);
+ }
+ }
+ }
+ }
+
+ // Nur aktuell gemountete Ziele behalten (robust gegen stale Pfade)
+ let mounted_now: HashSet<PathBuf> = read_proc_mounts()?
+ .into_iter()
+ .map(|m| abspath(&m.tgt))
+ .collect();
+
+ todo = todo.into_iter()
+ .map(|p| abspath(&p))
+ .filter(|p| mounted_now.contains(p))
+ .collect();
+
+ let mut did_something = !todo.is_empty();
+ let todo = sort_paths_deep_first(todo);
+
+ // Aushängen
+ let mut errors = Vec::new();
+ for mpt in &todo {
+ match umount(mpt) {
+ Ok(_) => vlog!(ctx, "[umount] {}", mpt.display()),
+ Err(e) => {
+ eprintln!("[warn] umount {}: {}", mpt.display(), e);
+ errors.push((mpt.clone(), e.to_string()));
+ }
+ }
+ }
+
+ // Nach dem Aushängen erneut Mounts einlesen
+ let mounts_after = read_proc_mounts().unwrap_or_default();
+
+ // LUKS-Mapper schließen (nur, wenn nicht mehr gemountet)
+ for mapper in mappers_in_use {
+ let devpath = format!("/dev/mapper/{mapper}");
+ let still_mounted = mounts_after.iter().any(|m| m.src == devpath);
+ if !still_mounted {
+ let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status();
+ vlog!(ctx, "[cryptsetup] close {}", mapper);
+ did_something = true;
+ } else {
+ eprintln!("[warn] Device {mapper} is still in use.");
+ errors.push((PathBuf::from(devpath), "still in use".into()));
+ }
+ }
+
+ // Loop-Devices lösen, falls Backing-File im state_dir und nicht mehr gemountet
+ for loopdev in loops_in_use {
+ let still_mounted = mounts_after.iter().any(|m| m.src == loopdev);
+ if !still_mounted {
+ // nur wenn Device existiert
+ if Path::new(&loopdev).exists() {
+ let _ = Command::new("losetup").arg("-d").arg(&loopdev).status();
+ vlog!(ctx, "[losetup] detach {}", loopdev);
+ did_something = true;
+ }
+ } else {
+ eprintln!("[warn] Loop device {} still in use.", loopdev);
+ errors.push((PathBuf::from(loopdev), "still in use".into()));
+ }
+ }
+
+ if !did_something && target.is_none() {
+ println!("No SquashR mounts found.");
+ }
+
+ if errors.is_empty() {
+ println!("All requested mounts unmounted and SquashR devices closed.");
+ Ok(())
+ } else {
+ bail!(
+ "Some mounts/devices could not be released: {:?}",
+ errors.iter().map(|(p,_)| p.display().to_string()).collect::<Vec<_>>()
+ )
+ }
+}
+
+/* ---------- Minimize ---------- */
+
fn cmd_minimize(ctx:&mut Ctx, n_opt:Option<usize>)->Result<()>{
let target = n_opt.unwrap_or(ctx.min_keep);
if target < ctx.min_keep {
@@ -1099,6 +1263,96 @@ fn cmd_minimize(ctx:&mut Ctx, n_opt:Option<usize>)->Result<()>{
if snaps.len() <= target { break; }
merge_first_two(ctx)?;
}
- println!("Database minimized. New snapshot count: {}", ctx.list_snapshots()?.len());
+ println!("Minimized. New snapshot count: {}", ctx.list_snapshots()?.len());
Ok(())
}
+
+/* ---------- Merge ---------- */
+
+fn merge_first_two(ctx:&mut Ctx)->Result<()>{
+ require_root("merge (mounts / overlay)")?;
+ let snaps = ctx.list_snapshots()?;
+ if snaps.len() < 2 { return Ok(()); }
+ let s1 = &snaps[0];
+ let s2 = &snaps[1];
+ let idx1 = parse_snap_index(s1)?;
+ let idx2 = parse_snap_index(s2)?;
+ if idx2 != idx1+1 { bail!("Unexpected indices: {:04} + {:04}", idx1, idx2); }
+
+ let m1 = ctx.mounts_dir.join(format!("merge_{:04}", idx1));
+ let m2 = ctx.mounts_dir.join(format!("merge_{:04}", idx2));
+ fs::create_dir_all(&m1)?;
+ fs::create_dir_all(&m2)?;
+ mount_image_ro(ctx, s1, &m1)?;
+ mount_image_ro(ctx, s2, &m2)?;
+
+ let upper = ctx.temp_path("merge.upper");
+ let work = ctx.temp_path("merge.work");
+ let view = ctx.temp_path("merge.view");
+ fs::create_dir_all(&upper)?;
+ fs::create_dir_all(&work)?;
+ fs::create_dir_all(&view)?;
+ let loweropt = format!("{}:{}", abspath(&m1).display(), abspath(&m2).display()); // s2 über s1
+ mount_overlay(&loweropt, &upper, &work, &view)?;
+
+ // Unlinks gemäß manifest_2 (Zielzustand) gegenüber manifest_1
+ let present = load_manifest(ctx, idx2).unwrap_or_default();
+ let past = load_manifest(ctx, idx1).unwrap_or_default();
+ let deletes: Vec<String> = past.difference(&present).cloned().collect();
+ if !deletes.is_empty() {
+ apply_whiteouts_via_unlink(&abspath(&view), &deletes)?;
+ }
+
+ // neues SquashFS aus dem View
+ let tmp_plain = ctx.temp_path("merge.plain.sqsh");
+ let mut cmd = Command::new("mksquashfs");
+ cmd.arg(&view).arg(&tmp_plain).arg("-no-progress").arg("-no-recovery");
+ if ctx.comp_enable {
+ cmd.arg("-comp").arg(&ctx.comp_algo);
+ if let Some(extra)=ctx.comp_args.as_ref(){ for tok in shell_split(extra){ cmd.arg(tok);} }
+ }
+ run(&mut cmd, "mksquashfs (merge)")?;
+
+ let _ = umount(&view);
+ let _ = umount(&m2);
+ let _ = umount(&m1);
+
+ let is_luks = s1.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
+ let out_path = ctx.snapshot_path(idx1, is_luks);
+ if s1.exists(){ fs::remove_file(s1)?; }
+ if is_luks {
+ encrypt_into_luks(ctx, &tmp_plain, &out_path)?;
+ fs::remove_file(&tmp_plain).ok();
+ } else {
+ fs::rename(&tmp_plain, &out_path)?;
+ }
+ if s2.exists(){ fs::remove_file(s2)?; }
+
+ // Indizes > idx2 runterzählen (Dateien + Manifeste)
+ let rest = ctx.list_snapshots()?;
+ for p in rest {
+ let n = parse_snap_index(&p)?;
+ if n > idx2 {
+ let is_luks = p.extension().and_then(|e| e.to_str()).unwrap_or("") == "luks";
+ let newp = ctx.snapshot_path(n-1, is_luks);
+ fs::rename(&p, &newp)?;
+ }
+ }
+ // Manifeste: manifest_1 := manifest_2, danach alle >2 dekrementieren
+ let m1p = ctx.manifest_path(1);
+ let m2p = ctx.manifest_path(2);
+ if m1p.exists(){ fs::remove_file(&m1p).ok(); }
+ if m2p.exists(){ fs::rename(&m2p, &m1p).ok(); }
+ let mut k = 3usize;
+ loop {
+ let from = ctx.manifest_path(k);
+ if !from.exists() { break; }
+ let to = ctx.manifest_path(k-1);
+ fs::rename(&from, &to)?;
+ k += 1;
+ }
+
+ println!("Merged snapshots {:04} + {:04} → {:04}.", idx1, idx2, idx1);
+ Ok(())
+}
+