diff options
| author | Leonard Kugis <leonard@kug.is> | 2025-10-17 02:48:31 +0200 |
|---|---|---|
| committer | Leonard Kugis <leonard@kug.is> | 2025-10-17 02:48:31 +0200 |
| commit | 138047dbc25c150a6b4efafa9cec81de425bd5c7 (patch) | |
| tree | 9c3769ff643306d7571f236abba77b6ea33b28a2 | |
| parent | e67e3a50889aaa8eefbb334ef408057a2411963f (diff) | |
| download | squashr-138047dbc25c150a6b4efafa9cec81de425bd5c7.tar.gz | |
Fixed timestamp folders and posix/gnu format
| -rwxr-xr-x | src/main.rs | 356 |
1 files changed, 274 insertions, 82 deletions
diff --git a/src/main.rs b/src/main.rs index 59008e3..c795089 100755 --- a/src/main.rs +++ b/src/main.rs @@ -23,7 +23,7 @@ use serde::Deserialize; use std::collections::{BTreeSet, HashMap, HashSet}; use std::env; use std::fs; -use std::io::{Read, Write}; +use std::io::{self, Read, Write}; use std::io::ErrorKind; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; @@ -254,6 +254,8 @@ struct Ctx { luks_open_args: Option<String>, luks_pass: Option<Vec<u8>>, verbose: bool, + debug_capture: bool, + debug_cap_bytes: u64, } impl Ctx { @@ -265,7 +267,7 @@ impl Ctx { fs::create_dir_all(&state_dir)?; fs::create_dir_all(&mounts_dir)?; fs::create_dir_all(&work_dir)?; - let tar_snapshot = state_dir.join("tar.snapshot"); + let tar_snapshot = work_dir.join("tar.snapshot"); let max_raw = cfg.SQUASHR_N_SNAPSHOTS_MAX.unwrap_or(0); let max_keep = if max_raw == 0 { None } else { Some(max_raw) }; @@ -279,6 +281,17 @@ impl Ctx { None }; + let debug_capture = env::var("SQUASHR_DEBUG_TAR_CAPTURE") + .ok() + .map(|v| v == "1" || v.eq_ignore_ascii_case("true")) + .unwrap_or(false); + + let debug_cap_bytes = env::var("SQUASHR_DEBUG_TAR_MAX_MB") + .ok() + .and_then(|s| s.parse::<u64>().ok()) + .map(|mb| mb.saturating_mul(1024 * 1024)) + .unwrap_or(0); + Ok(Self { root, state_dir, @@ -298,6 +311,8 @@ impl Ctx { luks_open_args: cfg.SQUASHR_CRYPTSETUP_OPEN_ARGS, luks_pass, verbose, + debug_capture, + debug_cap_bytes, }) } @@ -508,42 +523,147 @@ fn parse_tar_size_octal(field: &[u8]) -> u64 { s } +fn parse_tar_size(field: &[u8]) -> u64 { + if field.is_empty() { return 0; } + // GNU base-256? + if (field[0] & 0x80) != 0 { + let mut v: u128 = 0; + let mut first = true; + for &b in field { + let bb = if first { b & 0x7F } else { b }; + first = false; + v = (v << 8) | (bb as u128); + } + return v as u64; + } + // Standard: oktal bis Space/NUL + let mut s = 0u64; + for &c in field { + match c { + b'0'..=b'7' => s = (s << 3) + (c - b'0') as u64, + b' ' | 0 => break, + _ => {} + } + } + s +} + +fn tar_checksum(header: &[u8; 512]) -> u32 { + let mut sum: u32 = 0; + for i in 0..512 { + if (148..156).contains(&i) { + sum += b' ' as u32; + } else { + sum += header[i] as u32; + } + } + sum +} + +fn parse_hdr_chksum_octal(field: &[u8]) -> Option<u32> { + let mut s: u32 = 0; + let mut any = false; + for &c in field { + match c { + b'0'..=b'7' => { s = (s << 3) + (c - b'0') as u32; any = true; } + b' ' | 0 => break, + _ => {} + } + } + if any { Some(s) } else { None } +} + +fn is_valid_tar_header(h: &[u8; 512]) -> bool { + if is_zero_block(h) { return true; } + match parse_hdr_chksum_octal(&h[148..156]) { + Some(want) => tar_checksum(h) == want, + None => false, + } +} + +fn copy_blocks_resync<R: Read, W: Write>( + r: &mut R, + mut out: Option<&mut W>, // <- mut + mut bytes: u64, +) -> Result<[u8; 512]> { + let mut buf = vec![0u8; 128 * 1024]; + while bytes > 0 { + let take = (bytes as usize).min(buf.len()); + r.read_exact(&mut buf[..take])?; + if let Some(w) = out.as_mut() { // <- as_mut statt as_ref + w.write_all(&buf[..take])?; + } + bytes -= take as u64; + } + + loop { + let mut next = [0u8; 512]; + r.read_exact(&mut next)?; + if is_valid_tar_header(&next) { + return Ok(next); + } + if let Some(w) = out.as_mut() { // <- as_mut statt as_ref + w.write_all(&next)?; + } + } +} + fn forward_tar_strip_gnu_dumpdirs<R: Read, W: Write>(mut r: R, mut w: W) -> Result<()> { let mut header = [0u8; 512]; - let mut buf = vec![0u8; 1024 * 1024]; + let mut pending: Vec<Vec<u8>> = Vec::new(); // komplette Records L/K/x/g puffern - loop { - r.read_exact(&mut header).with_context(|| "read tar header")?; + // erstes Header-Block lesen + r.read_exact(&mut header).with_context(|| "read tar header")?; + loop { + // Trailer? if is_zero_block(&header) { + // zweiter Zero-Block gehört dazu + let mut zero2 = [0u8; 512]; + r.read_exact(&mut zero2).with_context(|| "read tar trailing zero")?; + // evtl. pending verwerfen (sollte leer sein) + pending.clear(); w.write_all(&header)?; - r.read_exact(&mut header).with_context(|| "read tar trailing zero")?; - w.write_all(&header)?; + w.write_all(&zero2)?; w.flush()?; break; } + if !is_valid_tar_header(&header) { + bail!("TAR desync: invalid header checksum encountered"); + } + let typeflag = header[156] as char; - let size = parse_tar_size_octal(&header[124..124+12]); - let padded = ((size + 511) / 512) * 512; - - if typeflag == 'D' { - let mut to_skip = padded; - while to_skip > 0 { - let take = to_skip.min(buf.len() as u64) as usize; - r.read_exact(&mut buf[..take])?; - to_skip -= take as u64; + let size = parse_tar_size(&header[124..124+12]); + let padded = (size + 511) & !511; + + match typeflag { + // L/K/x/g: vollständig puffern (Header + Payload (+ evtl. Spill)) + 'L' | 'K' | 'x' | 'g' => { + let mut rec: Vec<u8> = Vec::with_capacity(512); + rec.extend_from_slice(&header); + // Payload plus evtl. Spill lesen + header = copy_blocks_resync(&mut r, Some(&mut rec), padded)?; + pending.push(rec); + continue; // weiter mit neuem 'header' } - continue; - } - w.write_all(&header)?; - let mut remaining = padded as i64; - while remaining > 0 { - let take = remaining.min(buf.len() as i64) as usize; - r.read_exact(&mut buf[..take])?; - w.write_all(&buf[..take])?; - remaining -= take as i64; + // GNU dumpdir → komplett wegwerfen, inkl. evtl. vorausgehender L/K/x/g + 'D' => { + header = copy_blocks_resync(&mut r, None::<&mut W>, padded)?; + pending.clear(); + continue; + } + + // alle „normalen“ Einträge: erst pending ausgeben, dann diesen Record + _ => { + for rec in pending.drain(..) { w.write_all(&rec)?; } + // aktuellen Header schreiben + w.write_all(&header)?; + // Payload (und evtl. Spill) streamen + header = copy_blocks_resync(&mut r, Some(&mut w), padded)?; + continue; + } } } @@ -625,7 +745,7 @@ fn extract_file_from_meta(ctx:&Ctx, filename:&str, dest_path:&Path) -> Result<bo fn cleanup_plain_meta_files(ctx:&Ctx) { // tar.snapshot + alle manifest_* im state_dir entfernen, falls vorhanden let _ = fs::remove_file(&ctx.tar_snapshot); - if let Ok(rd) = fs::read_dir(&ctx.state_dir) { + if let Ok(rd) = fs::read_dir(&ctx.work_dir) { for e in rd.flatten() { if let Some(name) = e.file_name().to_str() { if name.starts_with("manifest_") && name.ends_with(".txt") { @@ -704,44 +824,37 @@ fn load_manifest(ctx:&Ctx, idx: usize) -> Result<BTreeSet<String>> { /* ---------- TAR → SQFSTAR ---------- */ -/// Erzeugt inkrementellen *tar* und streamt ihn (mit Filter) in `sqfstar`. -/// Rückgabewert: true, falls tar mit Non-Zero beendet wurde (Warnung). fn build_squash_image_tar_sqfstar( ctx: &Ctx, out: &Path, - roots: &HashMap<String, PathBuf>, -) -> Result<bool /* tar_nonzero */> { + roots: &std::collections::HashMap<String, std::path::PathBuf>, +) -> anyhow::Result<bool> { + use std::io::{self, Read, Write}; + if out.exists() { - fs::remove_file(out).with_context(|| format!("remove old output: {}", out.display()))?; + std::fs::remove_file(out) + .with_context(|| format!("remove old output: {}", out.display()))?; } - // sqfstar: Tar von stdin → SquashFS out - let mut sq = Command::new("sqfstar"); + // --- sqfstar: liest Tar von stdin, schreibt SquashFS nach `out` + let mut sq = std::process::Command::new("sqfstar"); if ctx.comp_enable { sq.arg("-comp").arg(&ctx.comp_algo); } sq.arg(out).arg("-"); - sq.stdin(Stdio::piped()); - sq.stdout(Stdio::inherit()); - sq.stderr(Stdio::inherit()); - - vlog!(ctx, "[backup] sqfstar ← tar (incremental stream, dumpdir-filtered) → {}", out.display()); + sq.stdin(std::process::Stdio::piped()); + sq.stdout(std::process::Stdio::inherit()); + sq.stderr(std::process::Stdio::inherit()); let mut sq_child = sq.spawn().with_context(|| "Unable to start sqfstar")?; - let sq_stdin = sq_child.stdin.take().ok_or_else(|| anyhow!("cannot open sqfstar stdin"))?; + let mut sq_stdin = sq_child.stdin + .take() + .ok_or_else(|| anyhow::anyhow!("cannot open sqfstar stdin"))?; - // tar: POSIX/PAX + GNU listed-incremental; Pfade relativ zu / - let mut tar = Command::new("tar"); - tar.arg("--format=posix") - .arg("-C").arg("/") - .arg("--null") - .arg("--files-from=-") - .arg("--listed-incremental").arg(&ctx.tar_snapshot) - .arg("-cf").arg("-") - .arg("--numeric-owner") - .arg("--ignore-failed-read"); + // --- tar: POSIX/PAX + listed-incremental, KEINE Filterei + let mut tar = std::process::Command::new("tar"); - // Excludes (relativ zu /) - let mut all_excludes: Vec<PathBuf> = ctx.exclude.clone(); + // Excludes relativ zu / + let mut all_excludes: Vec<std::path::PathBuf> = ctx.exclude.clone(); for auto in [&ctx.state_dir, &ctx.mounts_dir, &ctx.work_dir] { all_excludes.push(auto.clone()); } @@ -752,32 +865,48 @@ fn build_squash_image_tar_sqfstar( } } - tar.stdin(Stdio::piped()); - tar.stdout(Stdio::piped()); - tar.stderr(Stdio::inherit()); + tar.arg("--format=posix") + // Entfernt das GNU.dumpdir-Attribut aus PAX-XHeaders → keine Warnungen mehr + //.arg("--pax-option").arg("delete=GNU.dumpdir,exthdr.name=%d/PaxHeaders/%f") + .arg("-C").arg("/") + .arg("--null") + .arg("--files-from=-") + .arg("--listed-incremental").arg(&ctx.tar_snapshot) + .arg("-cf").arg("-") + .arg("--numeric-owner") + .arg("--ignore-failed-read"); + + tar.stdin(std::process::Stdio::piped()); + tar.stdout(std::process::Stdio::piped()); + tar.stderr(std::process::Stdio::inherit()); let mut tar_child = tar.spawn().with_context(|| "Unable to start tar")?; - // Pfadliste: Roots *relativ* zu / (z.B. "usr", "var", "home/lk") + // Pfadliste (roots) in tar stdin schreiben, 0-terminiert { let mut w = std::io::BufWriter::new( - tar_child.stdin.take().ok_or_else(|| anyhow!("cannot open tar stdin"))? + tar_child.stdin.take().ok_or_else(|| anyhow::anyhow!("cannot open tar stdin"))? ); for (_top, abs_root) in roots.iter() { let rel = abs_root.to_string_lossy().trim_start_matches('/').to_string(); let item = if rel.is_empty() { ".".to_string() } else { rel }; w.write_all(item.as_bytes())?; - w.write_all(&[0])?; // NUL + w.write_all(&[0])?; } w.flush()?; - drop(w); } - // tar stdout → (Filter: strip dumpdir) → sqfstar stdin + // tar stdout *direkt* zu sqfstar stdin pumpen (ohne Filter) { - let tar_stdout = tar_child.stdout.take().ok_or_else(|| anyhow!("cannot open tar stdout"))?; - forward_tar_strip_gnu_dumpdirs(tar_stdout, sq_stdin)?; + let mut tar_stdout = tar_child.stdout + .take() + .ok_or_else(|| anyhow::anyhow!("cannot open tar stdout"))?; + // robust kopieren bis EOF + io::copy(&mut tar_stdout, &mut sq_stdin) + .with_context(|| "stream tar → sqfstar")?; } + drop(sq_stdin); + // Exit-Codes prüfen let tar_status = tar_child.wait().with_context(|| "waiting for tar failed")?; let tar_code = tar_status.code().unwrap_or(-1); let tar_nonzero = tar_code != 0; @@ -787,31 +916,12 @@ fn build_squash_image_tar_sqfstar( let sq_status = sq_child.wait().with_context(|| "waiting for sqfstar failed")?; if !sq_status.success() { - bail!("sqfstar failed with status {}", sq_status); + anyhow::bail!("sqfstar failed with status {}", sq_status); } Ok(tar_nonzero) } -fn truncate_logs(ctx:&Ctx)->Result<()>{ - if ctx.truncate.is_empty(){ return Ok(()); } - for base in &ctx.truncate { - if !base.exists(){ continue; } - for entry in WalkDir::new(base).follow_links(false).into_iter().filter_map(|e| e.ok()) { - let p = entry.path(); - if p.is_file() { - let name = p.file_name().and_then(|s| s.to_str()).unwrap_or(""); - if name.ends_with(".gz") || name.ends_with(".xz") || name.ends_with(".zst") || name.ends_with(".bz2") { - let _ = fs::remove_file(p); - continue; - } - let _ = fs::OpenOptions::new().write(true).open(p).and_then(|f| { f.set_len(0)?; Ok(()) }); - } - } - } - Ok(()) -} - fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{ require_root("Creating LUKS container")?; @@ -829,6 +939,7 @@ fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{ cryptsetup_run(&mut c, ctx.luks_pass.as_deref(), "cryptsetup luksFormat")?; let mapper = format!("squashr_{}", parse_snap_index(out_luks).unwrap_or(0)); + let _ = Command::new("cryptsetup").arg("close").arg(&mapper).status(); vlog!(ctx, "[luks] open {} as {}", out_luks.display(), mapper); let mut o = Command::new("cryptsetup"); if let Some(args)=ctx.luks_open_args.as_ref(){ for t in shell_split(args){ o.arg(t);} } @@ -840,7 +951,7 @@ fn encrypt_into_luks(ctx:&Ctx, plain:&Path, out_luks:&Path)->Result<()>{ let mut dd = Command::new("dd"); dd.arg(format!("if={}", plain.display())) .arg(format!("of={}", dev)) - .arg("bs=4M").arg("status=none").arg("conv=fsync"); + .arg("bs=4M").arg("status=progress").arg("conv=fsync"); run(&mut dd, "dd")?; vlog!(ctx, "[luks] close {}", mapper); @@ -928,6 +1039,49 @@ fn loop_backing_file(dev: &str) -> Option<PathBuf> { None } +fn list_squashr_mappers() -> Vec<String> { + let mut out = Vec::new(); + if let Ok(rd) = fs::read_dir("/dev/mapper") { + for e in rd.flatten() { + if let Some(name) = e.file_name().to_str() { + if name.starts_with("squashr_") { out.push(name.to_string()); } + } + } + } + out +} + +fn list_loops_on_state(ctx: &Ctx) -> Vec<String> { + let mut out = Vec::new(); + if let Ok(rd) = fs::read_dir("/sys/block") { + for e in rd.flatten() { + let name = e.file_name().to_string_lossy().to_string(); + if !name.starts_with("loop") { continue; } + let bf1 = format!("/sys/block/{}/loop/backing_file", name); + let bf2 = format!("/sys/devices/virtual/block/{}/loop/backing_file", name); + for bf in [bf1, bf2] { + if let Ok(s) = fs::read_to_string(&bf) { + let p = PathBuf::from(s.trim()); + if !p.as_os_str().is_empty() && p.starts_with(&ctx.state_dir) { + out.push(format!("/dev/{}", name)); + break; + } + } + } + } + } + out +} + +fn purge_dir(dir: &Path) { + if let Ok(rd) = fs::read_dir(dir) { + for e in rd.flatten() { + let p = e.path(); + let _ = if p.is_dir() { fs::remove_dir_all(&p) } else { fs::remove_file(&p) }; + } + } +} + /* ---------- Backup ---------- */ fn ensure_tar_snapshot_materialized(ctx:&Ctx) -> Result<()> { @@ -936,6 +1090,30 @@ fn ensure_tar_snapshot_materialized(ctx:&Ctx) -> Result<()> { Ok(()) } +fn truncate_logs(ctx:&Ctx)->Result<()>{ + if ctx.truncate.is_empty(){ return Ok(()); } + for base in &ctx.truncate { + if !base.exists(){ continue; } + for entry in WalkDir::new(base).follow_links(false).into_iter().filter_map(|e| e.ok()) { + let p = entry.path(); + if p.is_file() { + let name = p.file_name().and_then(|s| s.to_str()).unwrap_or(""); + // komprimierte Logs löschen + if name.ends_with(".gz") || name.ends_with(".xz") || name.ends_with(".zst") || name.ends_with(".bz2") { + let _ = fs::remove_file(p); + continue; + } + // unkomprimierte Logs auf Länge 0 setzen + let _ = fs::OpenOptions::new() + .write(true) + .open(p) + .and_then(|f| { f.set_len(0)?; Ok(()) }); + } + } + } + Ok(()) +} + fn cmd_backup(ctx:&mut Ctx)->Result<()>{ ensure_includes_nonempty(ctx)?; truncate_logs(ctx)?; @@ -1340,6 +1518,13 @@ fn cmd_umount(ctx:&mut Ctx, target: Option<&Path>) -> Result<()> { } } + if target.is_none() { + // 6) Alle /dev/mapper/squashr_* aufnehmen (auch wenn nirgendwo gemountet) + for m in list_squashr_mappers() { mappers_in_use.insert(m); } + // 7) Alle Loop-Devices, deren backing_file im state_dir liegt (auch ohne Mount) + for l in list_loops_on_state(ctx) { loops_in_use.insert(l); } + } + // Nur aktuell gemountete Ziele behalten let mounted_now: HashSet<PathBuf> = read_proc_mounts()? .into_iter() @@ -1398,6 +1583,13 @@ fn cmd_umount(ctx:&mut Ctx, target: Option<&Path>) -> Result<()> { } } + // Workdir radikal leeren (temporäre Klartext-/Overlay-Reste) + if target.is_none() { + purge_dir(&ctx.work_dir); + // leere Mount-Unterverzeichnisse entfernen (best effort) + let _ = purge_dir(&ctx.mounts_dir); + } + if !did_something && target.is_none() { println!("No SquashR mounts found."); } |
