aboutsummaryrefslogtreecommitdiffstats
path: root/modules/general.py
diff options
context:
space:
mode:
authorLeonard Kugis <leonard@kug.is>2025-12-23 00:08:47 +0100
committerLeonard Kugis <leonard@kug.is>2025-12-23 00:08:47 +0100
commitec7598f568ff59ecc1eb51572f84d866b0180501 (patch)
tree775944e30a140cc20857a316397d9538e9d1eff6 /modules/general.py
parent78f4448a21614ed01b7c4e60eb496889bc58076d (diff)
downloadxembu-ec7598f568ff59ecc1eb51572f84d866b0180501.tar.gz
Removed unneccessary overhead
Diffstat (limited to 'modules/general.py')
-rw-r--r--modules/general.py214
1 files changed, 77 insertions, 137 deletions
diff --git a/modules/general.py b/modules/general.py
index f7eca38..3ad587a 100644
--- a/modules/general.py
+++ b/modules/general.py
@@ -12,57 +12,42 @@ from typing import Optional
from .base import Frame, BigFrame, ModuleResult
-
MONEY_UNITS = {"€", "eur", "EUR", "euro", "EURO"}
-
def _is_money_unit(u: str) -> bool:
return str(u).strip() in MONEY_UNITS
-
def compute_group_distribution(df: pd.DataFrame):
- """
- Liefert:
- group_summary: dict group -> info
- per_person: DataFrame columns [person, contributed, share, balance]
- per_group_person: DataFrame detail columns [group, person, contributed, usage, share, balance]
- """
- # Explode Gruppen
work = df.copy()
work = work.explode("dist_groups")
work["group"] = work["dist_groups"].fillna("").astype(str).str.strip()
work = work[work["group"] != ""]
- # C/U Normalisierung
- work["flag"] = work["Distributionsflag"].astype(str).str.strip().str.upper()
- work["person"] = work["Nutzer"].astype(str).str.strip()
+ work["flag"] = work["group_flag"].astype(str).str.strip().str.upper()
+ work["debitor"] = work["debitor"].astype(str).str.strip()
- # Contributions (Geld)
contrib = work[work["flag"] == "C"].copy()
if len(contrib) > 0:
bad_units = contrib[~contrib["unit"].apply(_is_money_unit)]
if len(bad_units) > 0:
raise ValueError(
"Contribution (C) muss Geld-Einheit haben (z.B. € / EUR). "
- f"Problemzeilen:\n{bad_units[['Datum','Nutzer','group','Positionsbezeichnung','Positionswert','unit']]}"
+ f"Problemzeilen:\n{bad_units[['date','debitor','group','position','val','unit']]}"
)
- # Usage (Beliebige Einheit, pro Gruppe sollte es sinnvoll einheitlich sein)
usage = work[work["flag"] == "U"].copy()
- # Summen
- contrib_by_gp = contrib.groupby(["group", "person"])["value"].sum().rename("contributed").reset_index()
- contrib_tot = contrib.groupby("group")["value"].sum().rename("total_contrib").reset_index()
+ contrib_by_gp = contrib.groupby(["group", "debitor"])["val"].sum().rename("contributed").reset_index()
+ contrib_tot = contrib.groupby("group")["val"].sum().rename("total_contrib").reset_index()
- usage_by_gp = usage.groupby(["group", "person"])["value"].sum().rename("usage").reset_index()
- usage_tot = usage.groupby("group")["value"].sum().rename("total_usage").reset_index()
+ usage_by_gp = usage.groupby(["group", "debitor"])["val"].sum().rename("usage").reset_index()
+ usage_tot = usage.groupby("group")["val"].sum().rename("total_usage").reset_index()
usage_unit = usage.groupby("group")["unit"].agg(lambda s: s.dropna().astype(str).unique().tolist()).reset_index()
usage_unit = usage_unit.rename(columns={"unit": "usage_units"})
- participants = work.groupby("group")["person"].agg(lambda s: sorted(set(s.tolist()))).reset_index()
- participants = participants.rename(columns={"person": "participants"})
+ participants = work.groupby("group")["debitor"].agg(lambda s: sorted(set(s.tolist()))).reset_index()
+ participants = participants.rename(columns={"debitor": "participants"})
- # group_summary
summary = (
participants.merge(contrib_tot, on="group", how="left")
.merge(usage_tot, on="group", how="left")
@@ -73,22 +58,19 @@ def compute_group_distribution(df: pd.DataFrame):
summary["has_usage"] = summary["total_usage"].apply(lambda x: x > 0)
summary["mode"] = summary.apply(lambda r: "usage" if r["has_usage"] else "equal", axis=1)
- # Detail pro (group, person)
detail = (
pd.DataFrame({"group": work["group"].unique()})
.assign(key=1)
- .merge(pd.DataFrame({"person": work["person"].unique()}).assign(key=1), on="key")
+ .merge(pd.DataFrame({"debitor": work["debitor"].unique()}).assign(key=1), on="key")
.drop(columns=["key"])
)
- # Nur relevante Paare, die in der Gruppe vorkommen
- gp_person = work[["group", "person"]].drop_duplicates()
- detail = detail.merge(gp_person, on=["group", "person"], how="inner")
+ gp_debitor = work[["group", "debitor"]].drop_duplicates()
+ detail = detail.merge(gp_debitor, on=["group", "debitor"], how="inner")
- detail = detail.merge(contrib_by_gp, on=["group", "person"], how="left").merge(usage_by_gp, on=["group", "person"], how="left")
+ detail = detail.merge(contrib_by_gp, on=["group", "debitor"], how="left").merge(usage_by_gp, on=["group", "debitor"], how="left")
detail["contributed"] = detail["contributed"].fillna(0.0)
detail["usage"] = detail["usage"].fillna(0.0)
- # Shares berechnen pro Gruppe
shares = []
for _, row in summary.iterrows():
g = row["group"]
@@ -97,7 +79,6 @@ def compute_group_distribution(df: pd.DataFrame):
n = len(parts) if parts else 0
g_detail = detail[detail["group"] == g].copy()
- # usage-mode, sobald es irgendeine U-Position gibt (auch wenn total_usage==0 → fallback)
g_has_any_u = (usage["group"] == g).any()
if g_has_any_u:
@@ -114,25 +95,22 @@ def compute_group_distribution(df: pd.DataFrame):
mode = "equal"
g_detail["mode"] = mode
- shares.append(g_detail[["group", "person", "share", "mode"]])
+ shares.append(g_detail[["group", "debitor", "share", "mode"]])
- shares_df = pd.concat(shares, ignore_index=True) if shares else pd.DataFrame(columns=["group","person","share","mode"])
- detail = detail.merge(shares_df, on=["group", "person"], how="left")
+ shares_df = pd.concat(shares, ignore_index=True) if shares else pd.DataFrame(columns=["group","debitor","share","mode"])
+ detail = detail.merge(shares_df, on=["group", "debitor"], how="left")
detail["share"] = detail["share"].fillna(0.0)
detail["balance"] = detail["contributed"] - detail["share"]
- # per_person totals
- per_person = detail.groupby("person")[["contributed", "share", "balance"]].sum().reset_index()
- per_person = per_person.sort_values("person")
+ per_debitor = detail.groupby("debitor")[["contributed", "share", "balance"]].sum().reset_index()
+ per_debitor = per_debitor.sort_values("debitor")
- # summary erweitern
- # "Sobald es eine Position mit U gibt" zählt, auch wenn total_usage==0 (fallback)
has_any_u = usage.groupby("group").size().rename("u_count").reset_index()
summary = summary.merge(has_any_u, on="group", how="left")
summary["u_count"] = summary["u_count"].fillna(0).astype(int)
summary["mode"] = summary["u_count"].apply(lambda c: "usage" if c > 0 else "equal")
- return summary, per_person, detail
+ return summary, per_debitor, detail
@dataclass
class GroupTimeSeries:
@@ -142,14 +120,13 @@ class GroupTimeSeries:
usage_units: List[str]
xlim_start: pd.Timestamp
xlim_end: pd.Timestamp
- contrib_cum: Dict[str, pd.Series] # € kumulativ
- usage_cum: Dict[str, pd.Series] # unit kumulativ (z.B. km, stk)
- share_cum: Dict[str, pd.Series] # € kumulativ (Anteil)
- ratio: Dict[str, pd.Series] # Anteil/Ausgelegt
+ contrib_cum: Dict[str, pd.Series]
+ usage_cum: Dict[str, pd.Series]
+ share_cum: Dict[str, pd.Series]
+ ratio: Dict[str, pd.Series]
def _auto_time_limits(tmin: pd.Timestamp, tmax: pd.Timestamp) -> tuple[pd.Timestamp, pd.Timestamp]:
- # +/- 5% Intervall, bei 0 Intervall fallback 30 Minuten
dt = tmax - tmin
if dt <= pd.Timedelta(0):
margin = pd.Timedelta(minutes=30)
@@ -159,28 +136,25 @@ def _auto_time_limits(tmin: pd.Timestamp, tmax: pd.Timestamp) -> tuple[pd.Timest
def _prepare_group_timeseries(df: pd.DataFrame, group: str) -> Optional[GroupTimeSeries]:
- # explode Gruppen und filtere
work = df.copy().explode("dist_groups")
work["group"] = work["dist_groups"].fillna("").astype(str).str.strip()
work = work[work["group"] == group].copy()
- work = work[pd.notna(work["Datum"])]
+ work = work[pd.notna(work["date"])]
if work.empty:
return None
- work["person"] = work["Nutzer"].astype(str).str.strip()
- work["flag"] = work["Distributionsflag"].astype(str).str.strip().str.upper()
+ work["debitor"] = work["debitor"].astype(str).str.strip()
+ work["flag"] = work["group_flag"].astype(str).str.strip().str.upper()
- participants = sorted(work["person"].unique().tolist())
+ participants = sorted(work["debitor"].unique().tolist())
- # timeline: alle Zeitpunkte der Gruppe (unique, sortiert)
- times = pd.DatetimeIndex(sorted(work["Datum"].unique()))
+ times = pd.DatetimeIndex(sorted(work["date"].unique()))
tmin, tmax = times.min(), times.max()
x0, x1 = _auto_time_limits(tmin, tmax)
times = times.union(pd.DatetimeIndex([x0, x1])).sort_values()
- # usage units (kann leer sein, oder mehrere – wir zeigen dann z.B. "km/stk")
usage_units = sorted(
work.loc[work["flag"] == "U", "unit"]
.dropna()
@@ -190,25 +164,21 @@ def _prepare_group_timeseries(df: pd.DataFrame, group: str) -> Optional[GroupTim
.tolist()
)
- # pro Person: Beiträge (C) und Nutzung (U) als kumulatives step-series auf timeline
contrib_cum: Dict[str, pd.Series] = {}
usage_cum: Dict[str, pd.Series] = {}
for p in participants:
- c = work[(work["person"] == p) & (work["flag"] == "C")].copy()
- u = work[(work["person"] == p) & (work["flag"] == "U")].copy()
+ c = work[(work["debitor"] == p) & (work["flag"] == "C")].copy()
+ u = work[(work["debitor"] == p) & (work["flag"] == "U")].copy()
- # Beiträge: nach Datum aggregieren, reindex auf timeline, kumulieren
- c_by_t = c.groupby("Datum")["value"].sum() if not c.empty else pd.Series(dtype=float)
+ c_by_t = c.groupby("date")["val"].sum() if not c.empty else pd.Series(dtype=float)
c_by_t = c_by_t.reindex(times, fill_value=0.0)
contrib_cum[p] = c_by_t.cumsum()
- # Nutzung: nach Datum aggregieren, reindex auf timeline, kumulieren
- u_by_t = u.groupby("Datum")["value"].sum() if not u.empty else pd.Series(dtype=float)
+ u_by_t = u.groupby("date")["val"].sum() if not u.empty else pd.Series(dtype=float)
u_by_t = u_by_t.reindex(times, fill_value=0.0)
usage_cum[p] = u_by_t.cumsum()
- # share über Zeit: kumulative total contributions verteilt
total_contrib = sum((contrib_cum[p] for p in participants), start=pd.Series(0.0, index=times))
total_usage = sum((usage_cum[p] for p in participants), start=pd.Series(0.0, index=times))
@@ -217,9 +187,7 @@ def _prepare_group_timeseries(df: pd.DataFrame, group: str) -> Optional[GroupTim
share_cum: Dict[str, pd.Series] = {}
if has_any_u:
- # usage-mode sobald U existiert; solange total_usage==0 => equal fallback
for p in participants:
- # share = total_contrib * usage_p / total_usage, sonst total_contrib/n
usage_p = usage_cum[p]
with np.errstate(divide="ignore", invalid="ignore"):
share_usage = total_contrib * (usage_p / total_usage.replace(0.0, np.nan))
@@ -227,7 +195,6 @@ def _prepare_group_timeseries(df: pd.DataFrame, group: str) -> Optional[GroupTim
share = share_usage.where(total_usage > 0, share_equal)
share_cum[p] = share.fillna(0.0)
else:
- # equal-mode immer
equal = total_contrib / float(n)
for p in participants:
share_cum[p] = equal
@@ -254,13 +221,6 @@ def _prepare_group_timeseries(df: pd.DataFrame, group: str) -> Optional[GroupTim
@dataclass
class GroupChartBigFrame(BigFrame):
- """
- kind:
- - 'usage_cum'
- - 'contrib_cum'
- - 'share_cum'
- - 'ratio'
- """
gts: GroupTimeSeries
kind: str
@@ -271,48 +231,45 @@ class GroupChartBigFrame(BigFrame):
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
- ax.xaxis.get_offset_text().set_visible(False) # <-- "2025-Dec" weg
+ ax.xaxis.get_offset_text().set_visible(False)
ax.set_xlim(self.gts.xlim_start, self.gts.xlim_end)
if self.kind == "usage_cum":
series_map = self.gts.usage_cum
unit = "/".join(self.gts.usage_units) if self.gts.usage_units else ""
- ax.set_ylabel(f"Verbrauch kumulativ {unit}".strip(), fontproperties=mono_font)
+ ax.set_ylabel(f"Usage cumulative {unit}".strip(), fontproperties=mono_font)
elif self.kind == "contrib_cum":
series_map = self.gts.contrib_cum
- ax.set_ylabel("Contributions kumulativ €", fontproperties=mono_font)
+ ax.set_ylabel("Contribution cumulative €", fontproperties=mono_font)
elif self.kind == "share_cum":
series_map = self.gts.share_cum
- ax.set_ylabel("Anteil kumulativ €", fontproperties=mono_font)
+ ax.set_ylabel("Share cumulative €", fontproperties=mono_font)
elif self.kind == "ratio":
series_map = self.gts.ratio
- ax.set_ylabel("Anteil / Ausgelegt", fontproperties=mono_font)
- ax.set_yscale("log") # <-- LOG
+ ax.set_ylabel("Share / Contribution ratio (logarithmic)", fontproperties=mono_font)
+ ax.set_yscale("log")
else:
raise ValueError(f"Unknown kind: {self.kind}")
- # Plot + Sammeln für robuste y-Limits
all_vals = []
- min_ratio = 1e-3 # „quasi 0“ für log, damit Kurven am Anfang nicht "mittendrin" starten
+ min_ratio = 1e-3
for p in self.gts.participants:
y = series_map[p].copy()
if self.kind == "ratio":
- # NaN/0/Inf behandeln, damit die Kurve von Anfang an existiert
y = y.replace([np.inf, -np.inf], np.nan)
y = y.fillna(min_ratio)
y = y.clip(lower=min_ratio)
else:
y = y.replace([np.inf, -np.inf], np.nan).fillna(0.0)
- # Steps für kumulative Kurven ist meist sauberer
ax.plot(self.gts.times, y.values, label=p, linewidth=1, drawstyle="steps-post")
v = y.values
@@ -320,7 +277,6 @@ class GroupChartBigFrame(BigFrame):
if v.size:
all_vals.append(v)
- # y-Limits so setzen, dass wirklich ALLE Werte sichtbar sind
if all_vals:
vv = np.concatenate(all_vals)
@@ -329,14 +285,14 @@ class GroupChartBigFrame(BigFrame):
if vmax <= 0:
ax.set_ylim(0, 1)
else:
- ax.set_ylim(0, vmax * 1.08) # kleiner Puffer
+ ax.set_ylim(0, vmax * 1.08)
elif self.kind == "ratio":
vpos = vv[vv > 0]
if vpos.size:
vmin = float(np.nanmin(vpos))
vmax = float(np.nanmax(vpos))
- ax.set_ylim(vmin / 1.5, vmax * 1.5) # log: multiplicative padding
+ ax.set_ylim(vmin / 1.5, vmax * 1.5)
ax.grid(True, alpha=0.2)
@@ -345,7 +301,6 @@ class GroupChartBigFrame(BigFrame):
for t in leg.get_texts():
t.set_fontproperties(mono_font)
- # Tick-Fonts monospace
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontproperties(mono_font)
@@ -358,12 +313,11 @@ class TextFrame(Frame):
@dataclass
class PlotBigFrame(BigFrame):
- per_person: pd.DataFrame # erwartet Spalten: person, contributed, share
+ per_debitor: pd.DataFrame
def render(self, ax: Axes, mono_font: FontProperties) -> None:
- # Axes ist schon da, wir zeichnen direkt hinein
ax.axis("on")
- plot_df = self.per_person.set_index("person")[["contributed", "share"]]
+ plot_df = self.per_debitor.set_index("debitor")[["contributed", "share"]]
plot_df.plot.bar(ax=ax)
ax.tick_params(axis="x", rotation=0)
leg = ax.legend(prop=mono_font)
@@ -386,38 +340,37 @@ class GeneralModule:
mono_font = context.get("mono_font") or FontProperties(family="DejaVu Sans Mono", size=8)
- group_summary, per_person, detail = compute_group_distribution(df)
+ group_summary, per_debitor, detail = compute_group_distribution(df)
- balance = {r["person"]: float(r["balance"]) for _, r in per_person.iterrows()}
+ balance = {r["debitor"]: float(r["balance"]) for _, r in per_debitor.iterrows()}
payments = self._minimize_payments(balance)
- # ---- NEU: Textauswertung für Konsole
summary_lines = []
- summary_lines.append("General – Verteilung über Distributionsgruppen")
+ summary_lines.append("General")
summary_lines.append("")
- summary_lines.append("Gruppen:")
+ summary_lines.append("Goups:")
for _, r in group_summary.sort_values("group").iterrows():
g = r["group"]
total_c = float(r.get("total_contrib", 0.0))
u_count = int(r.get("u_count", 0))
mode = "usage" if u_count > 0 else "equal"
participants = r.get("participants", []) or []
- summary_lines.append(f" - {g}: {total_c:.2f} €; mode={mode}; teilnehmer={len(participants)}")
+ summary_lines.append(f" - {g}: {total_c:.2f} €; mode={mode}; participants={len(participants)}")
summary_lines.append("")
- summary_lines.append("Personen (Summe über alle Gruppen):")
- for _, r in per_person.sort_values("person").iterrows():
+ summary_lines.append("Debitors (total):")
+ for _, r in per_debitor.sort_values("debitor").iterrows():
summary_lines.append(
- f" - {r['person']}: ausgelegt={r['contributed']:.2f} €; anteil={r['share']:.2f} €; saldo={r['balance']:.2f} €"
+ f" - {r['debitor']}: contributed={r['contributed']:.2f} €; share={r['share']:.2f} €; balance={r['balance']:.2f} €"
)
summary_lines.append("")
- summary_lines.append("Ausgleich (minimiert):")
+ summary_lines.append("Compensation (minimized):")
if payments:
for p, r, a in payments:
summary_lines.append(f" - {p} → {r}: {a:.2f} €")
else:
- summary_lines.append(" (keine Zahlungen nötig)")
+ summary_lines.append(" (No compensation required)")
summary_text = "\n".join(summary_lines)
@@ -426,45 +379,42 @@ class GeneralModule:
pages: List[plt.Figure] = []
if want_pdf:
- frames.extend(self._make_frames(group_summary, per_person, payments))
+ frames.extend(self._make_frames(group_summary, per_debitor, payments))
- # BigFrame: Gesamt-Balkenplot bleibt (wie vorher)
bigframes.append(
PlotBigFrame(
- title="General – Ausgelegt vs Anteil (Summe über Gruppen)",
- per_person=per_person.copy(),
+ title="General – Shares vs. Contributions (total)",
+ per_debitor=per_debitor.copy(),
)
)
- # NEU: pro Distributionsgruppe 4 BigFrame-Charts
for g in sorted(group_summary["group"].unique().tolist()):
gts = _prepare_group_timeseries(df, g)
if not gts:
continue
bigframes.append(GroupChartBigFrame(
- title=f"{g} – Kumulativer Verbrauch pro Person",
+ title=f"{g} – Cumulative usage per debitor",
gts=gts,
kind="usage_cum",
))
bigframes.append(GroupChartBigFrame(
- title=f"{g} – Kumulative Contributions pro Person",
+ title=f"{g} – Cumulative contributions per debitor",
gts=gts,
kind="contrib_cum",
))
bigframes.append(GroupChartBigFrame(
- title=f"{g} – Anteil pro Person (zeitlicher Verlauf)",
+ title=f"{g} – Share per debitor",
gts=gts,
kind="share_cum",
))
bigframes.append(GroupChartBigFrame(
- title=f"{g} – Verhältnis Anteil/Ausgelegt (zeitlicher Verlauf)",
+ title=f"{g} – Share / Contribution ratio (logarithmic)",
gts=gts,
kind="ratio",
))
- # Pages: nur noch Detailseiten, keine Balkenplot-Seite mehr
- pages.extend(self._make_pages(group_summary, per_person, detail, mono_font))
+ pages.extend(self._make_pages(group_summary, per_debitor, detail, mono_font))
return ModuleResult(summary_text=summary_text, frames=frames, bigframes=bigframes, pages=pages)
@@ -493,57 +443,52 @@ class GeneralModule:
j += 1
return out
- def _make_frames(self, group_summary: pd.DataFrame, per_person: pd.DataFrame, payments: List[Tuple[str,str,float]]) -> List[Frame]:
- # Frame 1: Gruppen-Übersicht
- lines = ["Gruppenübersicht:"]
+ def _make_frames(self, group_summary: pd.DataFrame, per_debitor: pd.DataFrame, payments: List[Tuple[str,str,float]]) -> List[Frame]:
+ lines = ["Groups:"]
for _, r in group_summary.sort_values("group").iterrows():
g = r["group"]
total_c = float(r.get("total_contrib", 0.0))
u_count = int(r.get("u_count", 0))
parts = r.get("participants", [])
mode = "usage" if u_count > 0 else "equal"
- lines.append(f"- {g}: {total_c:.2f} €; mode={mode}; teilnehmer={len(parts)}")
+ lines.append(f"- {g}: {total_c:.2f} €; mode={mode}; participants={len(parts)}")
- f1 = TextFrame(title="General: Gruppen", text="\n".join(lines))
+ f1 = TextFrame(title="General: Groups", text="\n".join(lines))
- # Frame 2: Personen-Totale
- lines = ["Personen (Summe über alle Gruppen):", "Person | contributed | share | balance"]
- for _, r in per_person.iterrows():
- lines.append(f"{r['person']}: {r['contributed']:.2f} €; {r['share']:.2f} €; {r['balance']:.2f} €")
- f2 = TextFrame(title="General: Personen", text="\n".join(lines))
+ lines = ["Debitor total:", "debitor | contributed | share | balance"]
+ for _, r in per_debitor.iterrows():
+ lines.append(f"{r['debitor']} | {r['contributed']:.2f} € | {r['share']:.2f} € | {r['balance']:.2f} €")
+ f2 = TextFrame(title="General: Debitors", text="\n".join(lines))
- # Frame 3: Ausgleich
- lines = ["Ausgleich (minimiert):"]
+ lines = ["Compensation (minimized):"]
if payments:
for p, r, a in payments:
lines.append(f"{p} → {r}: {a:.2f} €")
else:
- lines.append("(keine Zahlungen nötig)")
- f3 = TextFrame(title="General: Ausgleich", text="\n".join(lines))
+ lines.append("(No compensation required)")
+ f3 = TextFrame(title="General: Compensation", text="\n".join(lines))
return [f1, f2, f3]
- def _make_pages(self, group_summary, per_person, detail, mono_font) -> List[plt.Figure]:
+ def _make_pages(self, group_summary, per_debitor, detail, mono_font) -> List[plt.Figure]:
pages: List[plt.Figure] = []
- # Textseiten: pro Gruppe Detail (ggf. mehrere)
- # Wir machen je Gruppe eine Seite, wenn es nicht zu viele sind
for g in sorted(detail["group"].unique().tolist()):
- gdet = detail[detail["group"] == g].sort_values("person")
+ gdet = detail[detail["group"] == g].sort_values("debitor")
total_c = float(group_summary[group_summary["group"] == g]["total_contrib"].iloc[0]) if (group_summary["group"] == g).any() else 0.0
u_count = int(group_summary[group_summary["group"] == g]["u_count"].iloc[0]) if (group_summary["group"] == g).any() else 0
mode = "usage" if u_count > 0 else "equal"
lines = [
- f"Gruppe: {g}",
+ f"Group: {g}",
f"Total Contribution: {total_c:.2f} €",
f"Mode: {mode}",
"",
- "Person | contributed | usage | share | balance",
+ "debitor | contributed | usage | share | balance",
]
for _, r in gdet.iterrows():
lines.append(
- f"{r['person']}: {r['contributed']:.2f} €; {r['usage']:.4f}; {r['share']:.2f} €; {r['balance']:.2f} €"
+ f"{r['debitor']} | {r['contributed']:.2f} € | {r['usage']:.4f} | {r['share']:.2f} € | {r['balance']:.2f} €"
)
fig, ax = plt.subplots(figsize=(8.27, 11.69))
@@ -551,10 +496,5 @@ class GeneralModule:
ax.text(0, 1, "\n".join(lines), va="top", ha="left", fontproperties=mono_font)
pages.append(fig)
- # Optional: Nutzungsverläufe für Gruppen mit unit "km"
- # (nur wenn U vorhanden und unit in den U-rows km ist)
- # Dafür brauchen wir zeitliche Daten → aus detail nicht möglich, also direkt aus df wäre besser.
- # Wenn du willst, ergänze ich das als eigene Seite pro km-Gruppe auf Basis der Original-DF.
-
return pages