Release
This commit is contained in:
126
bin/snowpanel-collect.py
Normal file
126
bin/snowpanel-collect.py
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env python3
|
||||
import json, subprocess, time, os, sys, math, shutil
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
STATE_DIR = "/var/lib/snowpanel"
|
||||
STATS = os.path.join(STATE_DIR, "stats.json")
|
||||
META = os.path.join(STATE_DIR, "meta.json")
|
||||
CFG = "/etc/snowpanel/app.json"
|
||||
|
||||
def sh(cmd):
|
||||
return subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True).stdout.strip()
|
||||
|
||||
def load_json(path, default):
|
||||
try:
|
||||
with open(path, "r") as f: return json.load(f)
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
def save_json(path, obj):
|
||||
tmp = path + ".tmp"
|
||||
with open(tmp, "w") as f:
|
||||
json.dump(obj, f, separators=(",", ":"), ensure_ascii=False)
|
||||
os.replace(tmp, path)
|
||||
|
||||
def now():
|
||||
return int(time.time())
|
||||
|
||||
def service_bytes():
|
||||
out = sh(["/bin/systemctl","show","-p","IPIngressBytes","-p","IPEgressBytes","snowflake-proxy"])
|
||||
rx = tx = 0
|
||||
for line in out.splitlines():
|
||||
if line.startswith("IPIngressBytes="):
|
||||
try: rx = int(line.split("=",1)[1] or "0")
|
||||
except: pass
|
||||
elif line.startswith("IPEgressBytes="):
|
||||
try: tx = int(line.split("=",1)[1] or "0")
|
||||
except: pass
|
||||
return rx, tx
|
||||
|
||||
def period_start_for_reset_day(reset_day: int) -> int:
|
||||
reset_day = max(1, min(28, int(reset_day or 1)))
|
||||
now_dt = datetime.now(timezone.utc).astimezone()
|
||||
year = now_dt.year
|
||||
month = now_dt.month
|
||||
this_start = datetime(year, month, reset_day, 0, 0, 0, tzinfo=now_dt.tzinfo)
|
||||
if now_dt >= this_start:
|
||||
start = this_start
|
||||
else:
|
||||
if month == 1:
|
||||
year -= 1; month = 12
|
||||
else:
|
||||
month -= 1
|
||||
start = datetime(year, month, reset_day, 0, 0, 0, tzinfo=now_dt.tzinfo)
|
||||
return int(start.timestamp())
|
||||
|
||||
def main():
|
||||
os.makedirs(STATE_DIR, exist_ok=True)
|
||||
|
||||
rx, tx = service_bytes()
|
||||
t = now()
|
||||
|
||||
stats = load_json(STATS, {"data":[]})
|
||||
arr = stats.get("data", [])
|
||||
arr.append({"t": t, "read": int(rx), "written": int(tx)})
|
||||
if len(arr) > 5000:
|
||||
arr = arr[-5000:]
|
||||
stats["data"] = arr
|
||||
save_json(STATS, stats)
|
||||
|
||||
cfg = load_json(CFG, {})
|
||||
cap_gb = int(cfg.get("cap_gb", 0))
|
||||
cap_reset_day = int(cfg.get("cap_reset_day", 1))
|
||||
rate_mbps = int(cfg.get("rate_mbps", 0))
|
||||
|
||||
start_ts = period_start_for_reset_day(cap_reset_day)
|
||||
|
||||
rx_sum = 0
|
||||
tx_sum = 0
|
||||
prev = None
|
||||
for point in arr:
|
||||
if point["t"] < start_ts:
|
||||
continue
|
||||
if prev is not None:
|
||||
dt = point["t"] - prev["t"]
|
||||
if dt <= 0 or dt > 3600:
|
||||
prev = point; continue
|
||||
dr = max(0, point["read"] - prev["read"])
|
||||
dw = max(0, point["written"] - prev["written"])
|
||||
rx_sum += dr
|
||||
tx_sum += dw
|
||||
prev = point
|
||||
|
||||
total = rx_sum + tx_sum
|
||||
cap_bytes = cap_gb * 1024 * 1024 * 1024 if cap_gb > 0 else 0
|
||||
|
||||
current_rate_mbps = 0.0
|
||||
if len(arr) >= 2:
|
||||
a = arr[-2]; b = arr[-1]
|
||||
dt = max(1, b["t"] - a["t"])
|
||||
dr = max(0, b["read"] - a["read"])
|
||||
dw = max(0, b["written"] - a["written"])
|
||||
current_rate_mbps = ((dr + dw) * 8.0) / dt / 1_000_000.0
|
||||
|
||||
label = datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d") + f" (reset day {cap_reset_day})"
|
||||
meta = {
|
||||
"start_ts": start_ts,
|
||||
"period_label": label,
|
||||
"rx": rx_sum,
|
||||
"tx": tx_sum,
|
||||
"total": total,
|
||||
"cap_bytes": cap_bytes,
|
||||
"cap_hit": False,
|
||||
"current_rate_mbps": current_rate_mbps,
|
||||
"rate_mbps": rate_mbps
|
||||
}
|
||||
|
||||
if cap_bytes and total >= cap_bytes:
|
||||
meta["cap_hit"] = True
|
||||
active = sh(["/bin/systemctl","is-active","snowflake-proxy"]) == "active"
|
||||
if active:
|
||||
subprocess.run(["/usr/bin/sudo","/bin/systemctl","stop","snowflake-proxy"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
|
||||
save_json(META, meta)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
227
bin/snowpanel-enforce.py
Normal file
227
bin/snowpanel-enforce.py
Normal file
@@ -0,0 +1,227 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import subprocess
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
APP_JSON_CANDIDATES = ["/etc/snowpanel/app.json", "/var/lib/snowpanel/app.json"]
|
||||
LIMITS_JSON_CANDIDATES = ["/etc/snowpanel/limits.json", "/var/lib/snowpanel/limits.json"]
|
||||
|
||||
STATE_DIR = "/var/lib/snowpanel"
|
||||
META_JSON = os.path.join(STATE_DIR, "meta.json")
|
||||
STATS_JSON = os.path.join(STATE_DIR, "stats.json")
|
||||
|
||||
SYSTEMCTL = "/bin/systemctl"
|
||||
|
||||
def pretty_bytes(n: int) -> str:
|
||||
n = int(n)
|
||||
units = ["B", "KB", "MB", "GB", "TB"]
|
||||
i = 0
|
||||
f = float(n)
|
||||
while f >= 1024 and i < len(units) - 1:
|
||||
f /= 1024.0
|
||||
i += 1
|
||||
if i == 0:
|
||||
return f"{int(f)} {units[i]}"
|
||||
return f"{f:.2f} {units[i]}"
|
||||
|
||||
def load_first_json(paths):
|
||||
for p in paths:
|
||||
try:
|
||||
with open(p, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
pass
|
||||
return {}
|
||||
|
||||
def save_json_atomic(path, obj):
|
||||
tmp = f"{path}.tmp"
|
||||
with open(tmp, "w", encoding="utf-8") as f:
|
||||
json.dump(obj, f, ensure_ascii=False, indent=2)
|
||||
os.replace(tmp, path)
|
||||
|
||||
def period_bounds(reset_day: int):
|
||||
reset_day = max(1, min(28, int(reset_day or 1)))
|
||||
now = datetime.now()
|
||||
year = now.year
|
||||
month = now.month
|
||||
if now.day < reset_day:
|
||||
month -= 1
|
||||
if month == 0:
|
||||
month = 12
|
||||
year -= 1
|
||||
start = datetime(year, month, reset_day, 0, 0, 0)
|
||||
|
||||
end_guess = start + timedelta(days=32)
|
||||
end_year, end_month = end_guess.year, end_guess.month
|
||||
try:
|
||||
end = datetime(end_year, end_month, reset_day, 0, 0, 0)
|
||||
except ValueError:
|
||||
end = datetime(end_year, end_month, 1, 0, 0, 0)
|
||||
return start, end
|
||||
|
||||
def period_label(start: datetime, end: datetime) -> str:
|
||||
return f"{start.strftime('%b %-d')} → {end.strftime('%b %-d')}"
|
||||
|
||||
def load_limits():
|
||||
cfg = load_first_json(APP_JSON_CANDIDATES)
|
||||
|
||||
cap_gb = 0
|
||||
cap_reset_day = 1
|
||||
rate_mbps = 0
|
||||
|
||||
if cfg:
|
||||
cap_gb = int(cfg.get("cap_gb", 0) or 0)
|
||||
cap_reset_day = int(cfg.get("cap_reset_day", 0) or 0)
|
||||
rate_mbps = int(cfg.get("rate_mbps", 0) or 0)
|
||||
lim = cfg.get("limits") or {}
|
||||
if cap_gb == 0: cap_gb = int(lim.get("cap_gb", 0) or 0)
|
||||
if cap_reset_day == 0: cap_reset_day = int(lim.get("cap_reset_day", 0) or 0)
|
||||
if rate_mbps == 0: rate_mbps = int(lim.get("rate_mbps", 0) or 0)
|
||||
|
||||
if cap_gb == 0 or cap_reset_day == 0 or rate_mbps == 0:
|
||||
legacy = load_first_json(LIMITS_JSON_CANDIDATES)
|
||||
if legacy:
|
||||
if cap_gb == 0: cap_gb = int(legacy.get("cap_gb", 0) or 0)
|
||||
if cap_reset_day == 0: cap_reset_day = int(legacy.get("cap_reset_day", 1) or 1)
|
||||
if rate_mbps == 0: rate_mbps = int(legacy.get("rate_mbps", 0) or 0)
|
||||
|
||||
cap_gb = max(0, cap_gb)
|
||||
cap_reset_day = min(28, max(1, cap_reset_day or 1))
|
||||
rate_mbps = max(0, rate_mbps)
|
||||
return cap_gb, cap_reset_day, rate_mbps
|
||||
|
||||
def load_usage(period_start_ts: int, period_end_ts: int, verbose: bool = False):
|
||||
usage = {
|
||||
"start_ts": period_start_ts,
|
||||
"period_label": "",
|
||||
"rx": 0,
|
||||
"tx": 0,
|
||||
"total": 0,
|
||||
"cap_bytes": 0,
|
||||
"cap_hit": False,
|
||||
}
|
||||
|
||||
try:
|
||||
with open(META_JSON, "r", encoding="utf-8") as f:
|
||||
meta = json.load(f) or {}
|
||||
except Exception:
|
||||
meta = {}
|
||||
|
||||
if meta:
|
||||
meta_start = int(meta.get("start_ts") or 0)
|
||||
if meta_start >= period_start_ts and meta_start < period_end_ts:
|
||||
for k in ("start_ts", "period_label", "rx", "tx", "total", "cap_bytes", "cap_hit"):
|
||||
if k in meta:
|
||||
usage[k] = meta[k]
|
||||
if verbose:
|
||||
print("Using usage from meta.json")
|
||||
return usage
|
||||
|
||||
if verbose:
|
||||
print("meta.json missing/out-of-period, estimating from stats.json")
|
||||
|
||||
try:
|
||||
with open(STATS_JSON, "r", encoding="utf-8") as f:
|
||||
s = json.load(f) or {}
|
||||
rows = s.get("data") or []
|
||||
except Exception:
|
||||
rows = []
|
||||
|
||||
rows = sorted(rows, key=lambda r: int(r.get("t", 0)))
|
||||
prev = None
|
||||
total_rx = 0
|
||||
total_tx = 0
|
||||
for r in rows:
|
||||
t = int(r.get("t", 0))
|
||||
if t < period_start_ts or t >= period_end_ts:
|
||||
continue
|
||||
if prev is not None:
|
||||
dr = max(0, int(r.get("read", 0)) - int(prev.get("read", 0)))
|
||||
dw = max(0, int(r.get("written", 0)) - int(prev.get("written", 0)))
|
||||
total_rx += dr
|
||||
total_tx += dw
|
||||
prev = r
|
||||
|
||||
usage["rx"] = total_rx
|
||||
usage["tx"] = total_tx
|
||||
usage["total"] = total_rx + total_tx
|
||||
usage["start_ts"] = period_start_ts
|
||||
usage["period_label"] = period_label(datetime.fromtimestamp(period_start_ts),
|
||||
datetime.fromtimestamp(period_end_ts))
|
||||
usage["cap_bytes"] = 0
|
||||
usage["cap_hit"] = False
|
||||
return usage
|
||||
|
||||
def update_meta_cap_hit(cap_hit: bool, period_start_ts: int, period_end_ts: int, verbose: bool = False):
|
||||
try:
|
||||
meta = {}
|
||||
if os.path.isfile(META_JSON):
|
||||
with open(META_JSON, "r", encoding="utf-8") as f:
|
||||
meta = json.load(f) or {}
|
||||
|
||||
meta_start = int(meta.get("start_ts") or 0)
|
||||
if not (period_start_ts <= meta_start < period_end_ts):
|
||||
meta["start_ts"] = period_start_ts
|
||||
meta["period_label"] = period_label(datetime.fromtimestamp(period_start_ts),
|
||||
datetime.fromtimestamp(period_end_ts))
|
||||
meta["cap_hit"] = bool(cap_hit)
|
||||
|
||||
save_json_atomic(META_JSON, meta)
|
||||
if verbose:
|
||||
print(f"meta.json updated: cap_hit={cap_hit}")
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
print(f"meta.json update skipped: {e}")
|
||||
|
||||
def stop_service(service: str, verbose: bool = False, dry_run: bool = False):
|
||||
cmd = [SYSTEMCTL, "stop", service]
|
||||
if verbose:
|
||||
print("RUN:", " ".join(cmd) if not dry_run else "(dry-run) " + " ".join(cmd))
|
||||
if not dry_run:
|
||||
subprocess.run(cmd, check=False)
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser(description="SnowPanel monthly cap enforcer")
|
||||
ap.add_argument("-v", "--verbose", action="store_true", help="verbose output")
|
||||
ap.add_argument("--dry-run", action="store_true", help="do not stop the service")
|
||||
ap.add_argument("--service", default="snowflake-proxy", help="systemd service name")
|
||||
args = ap.parse_args()
|
||||
|
||||
cap_gb, reset_day, rate_mbps = load_limits()
|
||||
start_dt, end_dt = period_bounds(reset_day)
|
||||
start_ts = int(start_dt.timestamp())
|
||||
end_ts = int(end_dt.timestamp())
|
||||
|
||||
usage = load_usage(start_ts, end_ts, verbose=args.verbose)
|
||||
cap_bytes = usage.get("cap_bytes") or (cap_gb * (1024 ** 3))
|
||||
total = int(usage.get("total") or 0)
|
||||
|
||||
if args.verbose:
|
||||
print(f"Limits: cap_gb={cap_gb}, reset_day={reset_day}")
|
||||
print(f"Usage : total={total}B cap={cap_bytes}B start_ts={start_ts}")
|
||||
if cap_bytes > 0:
|
||||
pct = min(100, round(total * 100 / cap_bytes)) if cap_bytes else 0
|
||||
print(f" {pretty_bytes(total)} / {pretty_bytes(cap_bytes)} ({pct}%)")
|
||||
else:
|
||||
print(" Unlimited cap")
|
||||
|
||||
if cap_bytes > 0 and total >= cap_bytes:
|
||||
update_meta_cap_hit(True, start_ts, end_ts, verbose=args.verbose)
|
||||
if args.verbose:
|
||||
print(f"Cap reached — stopping {args.service}.")
|
||||
stop_service(args.service, verbose=args.verbose, dry_run=args.dry_run)
|
||||
else:
|
||||
update_meta_cap_hit(False, start_ts, end_ts, verbose=args.verbose)
|
||||
if args.verbose:
|
||||
print("Cap not reached — no action.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(130)
|
||||
33
bin/snowpanel-logdump
Normal file
33
bin/snowpanel-logdump
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
LINES="${1:-500}"
|
||||
LEVEL="${2:-info}"
|
||||
case "$LINES" in ''|*[!0-9]* ) LINES=500 ;; esac
|
||||
case "$LEVEL" in debug|info|notice|warning|err) ;; * ) LEVEL=info ;; esac
|
||||
|
||||
strip(){ sed -E 's/^.*\]:[[:space:]]*//'; }
|
||||
|
||||
app_logs=""
|
||||
if out="$(journalctl -t snowflake-proxy -p "$LEVEL" -n $((LINES*3)) -o short-iso --no-pager 2>/dev/null)"; then
|
||||
app_logs="$out"
|
||||
elif out="$(journalctl _COMM=snowflake-proxy -p "$LEVEL" -n $((LINES*3)) -o short-iso --no-pager 2>/dev/null)"; then
|
||||
app_logs="$out"
|
||||
fi
|
||||
|
||||
sys_lines=""
|
||||
if out="$(journalctl -u snowflake-proxy.service -p "$LEVEL" -n $((LINES*6)) -o short-iso --no-pager 2>/dev/null | grep -E ' (Started|Stopped|Stopping|Restarted|Reloaded|Failed) snowflake-proxy\.service|snowflake-proxy\.service: Consumed ')"; then
|
||||
sys_lines="$out"
|
||||
fi
|
||||
|
||||
combined="$(printf "%s\n%s\n" "${app_logs:-}" "${sys_lines:-}" | sed '/^[[:space:]]*$/d' | sort)"
|
||||
if [[ -n "$combined" ]]; then
|
||||
echo "$combined" | tail -n "$LINES" | strip
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if out="$(systemctl status snowflake-proxy --no-pager -l 2>/dev/null | grep -E ' snowflake-proxy\[[0-9]+\]:| (Started|Stopped|Stopping|Restarted|Reloaded|Failed) snowflake-proxy\.service|snowflake-proxy\.service: Consumed ')"; then
|
||||
echo "$out" | tail -n "$LINES" | strip
|
||||
exit 0
|
||||
fi
|
||||
|
||||
exit 0
|
||||
143
bin/snowpanel-shaper
Normal file
143
bin/snowpanel-shaper
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
read_cfg() {
|
||||
python3 - "$@" <<'PY'
|
||||
import json,sys
|
||||
cands=["/var/lib/snowpanel/app.json","/etc/snowpanel/app.json","/etc/snowpanel/limits.json","/var/lib/snowpanel/limits.json"]
|
||||
rate=0
|
||||
for p in cands:
|
||||
try:
|
||||
with open(p,"r",encoding="utf-8") as f:
|
||||
j=json.load(f)
|
||||
if isinstance(j,dict):
|
||||
v=j.get("rate_mbps") or (j.get("limits",{}) if isinstance(j.get("limits"),dict) else {}).get("rate_mbps")
|
||||
if v is not None:
|
||||
rate=int(v)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
print(rate)
|
||||
PY
|
||||
}
|
||||
|
||||
get_iface() {
|
||||
local ifs=() d4 d6
|
||||
d4=$(ip -o -4 route show to default 2>/dev/null | awk '{print $5}' | head -n1 || true)
|
||||
d6=$(ip -o -6 route show ::/0 2>/dev/null | awk '{print $5}' | head -n1 || true)
|
||||
[[ -n "${d4:-}" ]] && ifs+=("$d4")
|
||||
[[ -n "${d6:-}" && "${d6:-}" != "${d4:-}" ]] && ifs+=("$d6")
|
||||
[[ ${#ifs[@]} -eq 0 ]] && { echo ""; return 1; }
|
||||
echo "${ifs[0]}"
|
||||
}
|
||||
|
||||
get_uid() {
|
||||
local u pid
|
||||
u=$(systemctl show -p User --value snowflake-proxy 2>/dev/null || true)
|
||||
if [[ -n "${u:-}" ]]; then id -u "$u" 2>/dev/null || true; return 0; fi
|
||||
if id -u snowflake &>/dev/null; then id -u snowflake; return 0; fi
|
||||
pid=$(systemctl show -p MainPID --value snowflake-proxy 2>/dev/null || true)
|
||||
if [[ -n "${pid:-}" && -r "/proc/$pid/status" ]]; then awk '/^Uid:/{print $2; exit}' "/proc/$pid/status"; return 0; fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
ipt_add() {
|
||||
local bin="$1" chain="$2" rule="$3"
|
||||
"$bin" -t mangle -N "$chain" 2>/dev/null || true
|
||||
"$bin" -t mangle -C OUTPUT -j "$chain" 2>/dev/null || "$bin" -t mangle -A OUTPUT -j "$chain"
|
||||
eval "$bin -t mangle -C $chain $rule" 2>/dev/null || eval "$bin -t mangle -A $chain $rule"
|
||||
}
|
||||
ipt_in_add() {
|
||||
local bin="$1" chain="$2"
|
||||
"$bin" -t mangle -N "$chain" 2>/dev/null || true
|
||||
"$bin" -t mangle -C PREROUTING -j "$chain" 2>/dev/null || "$bin" -t mangle -A PREROUTING -j "$chain"
|
||||
"$bin" -t mangle -C "$chain" -j CONNMARK --restore-mark 2>/dev/null || "$bin" -t mangle -A "$chain" -j CONNMARK --restore-mark
|
||||
}
|
||||
ipt_clear() {
|
||||
local bin="$1"
|
||||
for c in SNOWPANEL SNOWPANEL_IN; do
|
||||
while "$bin" -t mangle -D OUTPUT -j SNOWPANEL 2>/dev/null; do :; done
|
||||
while "$bin" -t mangle -D PREROUTING -j SNOWPANEL_IN 2>/dev/null; do :; done
|
||||
"$bin" -t mangle -F "$c" 2>/dev/null || true
|
||||
"$bin" -t mangle -X "$c" 2>/dev/null || true
|
||||
done
|
||||
}
|
||||
|
||||
tc_clear() {
|
||||
local ifc="$1"
|
||||
tc qdisc del dev "$ifc" root 2>/dev/null || true
|
||||
tc qdisc del dev "$ifc" ingress 2>/dev/null || true
|
||||
tc qdisc del dev ifb0 root 2>/dev/null || true
|
||||
tc qdisc del dev ifb0 ingress 2>/dev/null || true
|
||||
ip link set ifb0 down 2>/dev/null || true
|
||||
ip link delete ifb0 type ifb 2>/dev/null || true
|
||||
}
|
||||
|
||||
tc_apply() {
|
||||
local ifc="$1" rate_kbit="$2"
|
||||
tc qdisc replace dev "$ifc" root handle 1: htb default 30
|
||||
tc class add dev "$ifc" parent 1: classid 1:1 htb rate 10000000kbit ceil 10000000kbit 2>/dev/null || \
|
||||
tc class change dev "$ifc" parent 1: classid 1:1 htb rate 10000000kbit ceil 10000000kbit
|
||||
tc class add dev "$ifc" parent 1:1 classid 1:10 htb rate "${rate_kbit}kbit" ceil "${rate_kbit}kbit" 2>/dev/null || \
|
||||
tc class change dev "$ifc" parent 1:1 classid 1:10 htb rate "${rate_kbit}kbit" ceil "${rate_kbit}kbit"
|
||||
tc class add dev "$ifc" parent 1:1 classid 1:30 htb rate 9000000kbit ceil 10000000kbit 2>/dev/null || \
|
||||
tc class change dev "$ifc" parent 1:1 classid 1:30 htb rate 9000000kbit ceil 10000000kbit
|
||||
tc filter replace dev "$ifc" parent 1: protocol all handle 0x1 fw flowid 1:10
|
||||
|
||||
modprobe ifb numifbs=1 2>/dev/null || true
|
||||
ip link add ifb0 type ifb 2>/dev/null || true
|
||||
ip link set dev ifb0 up
|
||||
tc qdisc replace dev "$ifc" ingress
|
||||
tc filter replace dev "$ifc" parent ffff: protocol all u32 match u32 0 0 action mirred egress redirect dev ifb0
|
||||
|
||||
tc qdisc replace dev ifb0 root handle 2: htb default 30
|
||||
tc class add dev ifb0 parent 2: classid 2:1 htb rate 10000000kbit ceil 10000000kbit 2>/dev/null || \
|
||||
tc class change dev ifb0 parent 2: classid 2:1 htb rate 10000000kbit ceil 10000000kbit
|
||||
tc class add dev ifb0 parent 2:1 classid 2:10 htb rate "${rate_kbit}kbit" ceil "${rate_kbit}kbit" 2>/dev/null || \
|
||||
tc class change dev ifb0 parent 2:1 classid 2:10 htb rate "${rate_kbit}kbit" ceil "${rate_kbit}kbit"
|
||||
tc class add dev ifb0 parent 2:1 classid 2:30 htb rate 9000000kbit ceil 10000000kbit 2>/dev/null || \
|
||||
tc class change dev ifb0 parent 2:1 classid 2:30 htb rate 9000000kbit ceil 10000000kbit
|
||||
tc filter replace dev ifb0 parent 2: protocol all handle 0x1 fw flowid 2:10
|
||||
}
|
||||
|
||||
apply() {
|
||||
local rate uid ifc per_kbit
|
||||
rate=$(read_cfg)
|
||||
uid=$(get_uid)
|
||||
ifc=$(get_iface || true)
|
||||
ipt_clear iptables || true
|
||||
ipt_clear ip6tables || true
|
||||
if [[ -z "${ifc:-}" || -z "${uid:-}" || "$rate" -le 0 ]]; then
|
||||
tc_clear "${ifc:-eth0}" || true
|
||||
exit 0
|
||||
fi
|
||||
per_kbit=$(( rate * 1000 / 2 ))
|
||||
[[ $per_kbit -lt 64 ]] && per_kbit=64
|
||||
|
||||
ipt_add iptables SNOWPANEL "-m owner --uid-owner $uid -j MARK --set-xmark 0x1/0x1"
|
||||
ipt_add iptables SNOWPANEL "-m owner --uid-owner $uid -j CONNMARK --save-mark"
|
||||
ipt_in_add iptables SNOWPANEL_IN
|
||||
if command -v ip6tables >/dev/null 2>&1; then
|
||||
ipt_add ip6tables SNOWPANEL "-m owner --uid-owner $uid -j MARK --set-xmark 0x1/0x1"
|
||||
ipt_add ip6tables SNOWPANEL "-m owner --uid-owner $uid -j CONNMARK --save-mark"
|
||||
ipt_in_add ip6tables SNOWPANEL_IN
|
||||
fi
|
||||
|
||||
tc_clear "$ifc" || true
|
||||
tc_apply "$ifc" "$per_kbit"
|
||||
}
|
||||
|
||||
clear_all() {
|
||||
local ifc
|
||||
ifc=$(get_iface || echo eth0)
|
||||
ipt_clear iptables || true
|
||||
ipt_clear ip6tables || true
|
||||
tc_clear "$ifc" || true
|
||||
}
|
||||
|
||||
cmd="${1:-apply}"
|
||||
case "$cmd" in
|
||||
apply) apply ;;
|
||||
clear) clear_all ;;
|
||||
*) apply ;;
|
||||
esac
|
||||
Reference in New Issue
Block a user