You can use below script for any RHEL change as pre & post check.

This was mainly designed for pre and post server reboot. To do comparison and provide the difference of them

#!/usr/bin/env bash
# Linux Healthcheck / Snapshot Compare
# -----------------------------------------------------
# Primary Usage : ./linux_healthcheck.sh [ -h | -i | -c | -C | -d | -g | -l | -q | -u | -v | --tgz-content ]
# Secondary Usage : ./linux_healthcheck.sh [ start [ <change_number> ] | stop | compare | clean ]
#
# Key behavior:
# - Uses /tmp/UNIX_Change
# - "stop" compares NEW (snapshot 0) vs OLDEST archive (highest suffix .N)
# - If any item is KO, print OLD & NEW lines.
#
# Updates in this version:
# - Adds OS/KERNEL/BOOT compare items:
# OS_redhat_release, KERNEL_uname_r, BOOT_grubby_default_kernel
# - Adds "collect-only" items:
# BOOT_grubby_info_all, NET_nmcli_con_show, NET_nmcli_eth0
# - Adds AUTOFS collection (NO manifest.txt):
# AUTOFS_auto_master + AUTOFS_maps (hash lines for map files)
# If /etc/auto.master is missing -> writes "N/A" to both files (stable/no noise)
# - Adds SYS_sysctl_imp (filtered important sysctls) for comparison
# Keeps SYS_sysctl_full as collect-only
# - Fix: compare alternatives_python file name
# - Keeps TGZ deep compare (metadata + optional content diff), skips binary content diffs
VERSION="1.2"
# ---- Defaults ----
SNAPHIST="${SNAPHIST:-53}" # History length
RUN_DIR_DEFAULT="/tmp/UNIX_Change" # Default directory for change snapshots
RUN_DIR="$RUN_DIR_DEFAULT"
LOG_FILE="${LOG_FILE:-/var/log/server_config_snapshot.log}"
INSTALL_NAME="Linux config snapshot"
CRON_SCHEDULE="40 0 * * 0" # weekly Sunday 00:40
CRON_TAG="# ${INSTALL_NAME}"
# Option A: one-shot flag to enable tgz content diff
FORCE_TGZ_CONTENT=0
# ---- Formatting ----
SPACE=21
ts() { date '+%Y/%m/%d %H:%M:%S'; }
msg() {
local mode="${1:-}"
case "$mode" in
-d) printf "%-${SPACE}s : %s\n" "$(ts)" "${2:-}" ;;
-e) printf "%${SPACE}s : ERROR : %s\n" "! ! ! ERROR ! ! !" "${2:-}" ;;
-w) printf "%${SPACE}s : Warning : %s\n" "Warning" "${2:-}" ;;
-s) printf "%${SPACE}s : Success : %s\n" "Success" "${2:-}" ;;
-i) printf "%${SPACE}s : %s\n" "${2:-}" "${3:-}" ;;
*)
local label="${1:-}" status="${2:-}"
case "$status" in
OK) printf "%${SPACE}s : OK\n" "$label" ;;
KO) printf "%${SPACE}s : KO\n" "$label" ;;
N/A) printf "%${SPACE}s : N/A\n" "$label" ;;
OLD)
while IFS= read -r line; do
[[ -z "$line" ]] && continue
printf "%${SPACE}s :OLD: %s\n" "$label" "$line"
done <<< "${3:-}"
;;
NEW)
while IFS= read -r line; do
[[ -z "$line" ]] && continue
printf "%${SPACE}s :NEW: %s\n" "$label" "$line"
done <<< "${3:-}"
;;
*)
printf "%${SPACE}s : %s\n" "$label" "$status"
;;
esac
;;
esac
}
mkdirp() { mkdir -p "$1"; }
rotate_file() {
local f="$1"
[[ -f "$f" ]] || return 0
local v="$SNAPHIST"
while (( v >= 1 )); do
if [[ -f "${f}.${v}" ]]; then
mv -f "${f}.${v}" "${f}.$((v+1))"
fi
v=$((v-1))
done
mv -f "$f" "${f}.1"
}
rotate_prune() {
local f="$1"
local max=$((SNAPHIST+1))
local n
for n in $(seq $((max+1)) $((max+200)) 2>/dev/null || true); do
[[ -f "${f}.${n}" ]] && rm -f "${f}.${n}" || true
done
}
host_short() { hostname -s 2>/dev/null || hostname; }
log_line() {
mkdir -p "$(dirname "$LOG_FILE")" || true
printf "%s : %s\n" "$(ts)" "$*" >> "$LOG_FILE" 2>/dev/null || true
}
require_root_for_cron() {
if [[ "$(id -u)" -ne 0 ]]; then
msg -e "Only root can (install/uninstall) cron entries."
exit 1
fi
}
# --- normalize helpers (reduces false KO due to CRLF / trailing spaces) ---
normalize_for_diff() {
tr -d '\r' | sed -e 's/[[:space:]]\+$//' -e '/^$/d'
}
strip_headers() { sed '/^# /d'; }
# -------- Snapshot content collection --------
collect_one() {
local _label="$1"
local cmd="$2"
local outfile="$3"
{
echo "# CMD: ${cmd}"
echo "# DATE: $(date -Iseconds)"
bash -lc "${cmd}" 2>/dev/null || true
} > "$outfile"
}
# Time zone
collect_timezone() {
local outfile="$1"
{
echo "# CMD: timedatectl | awk -F: \"/Time zone/ {print \$2}\""
echo "# DATE: $(date -Iseconds)"
if command -v timedatectl >/dev/null 2>&1; then
timedatectl | awk -F':' '/Time zone/ {print $2}' | sed 's/^ *//g'
else
echo "MISSING(timedatectl)"
fi
} > "$outfile"
}
# OS/KERNEL/BOOT compares
collect_os_release() {
local outfile="$1"
{
echo "# CMD: cat /etc/redhat-release"
echo "# DATE: $(date -Iseconds)"
if [[ -f /etc/redhat-release ]]; then
cat /etc/redhat-release 2>/dev/null || true
else
echo "MISSING(/etc/redhat-release)"
fi
} > "$outfile"
}
collect_uname_r() {
local outfile="$1"
{
echo "# CMD: uname -r"
echo "# DATE: $(date -Iseconds)"
uname -r 2>/dev/null || true
} > "$outfile"
}
collect_grubby_default_kernel() {
local outfile="$1"
{
echo "# CMD: grubby --default-kernel"
echo "# DATE: $(date -Iseconds)"
if command -v grubby >/dev/null 2>&1; then
grubby --default-kernel 2>/dev/null || true
else
echo "MISSING(grubby)"
fi
} > "$outfile"
}
# Collect-only: grubby info all
collect_grubby_info_all() {
local outfile="$1"
{
echo "# CMD: grubby --info=ALL"
echo "# DATE: $(date -Iseconds)"
if command -v grubby >/dev/null 2>&1; then
grubby --info=ALL 2>/dev/null || true
else
echo "MISSING(grubby)"
fi
} > "$outfile"
}
# Collect-only: nmcli summaries
collect_nmcli_con_show() {
local outfile="$1"
{
echo "# CMD: nmcli -t -f NAME,UUID,TYPE,DEVICE con show"
echo "# DATE: $(date -Iseconds)"
if command -v nmcli >/dev/null 2>&1; then
nmcli -t -f NAME,UUID,TYPE,DEVICE con show 2>/dev/null || true
else
echo "MISSING(nmcli)"
fi
} > "$outfile"
}
collect_nmcli_dev_eth0() {
local outfile="$1"
{
echo "# CMD: nmcli -g GENERAL.STATE,IP4.ADDRESS,IP4.GATEWAY,IP4.DNS dev show eth0"
echo "# DATE: $(date -Iseconds)"
if command -v nmcli >/dev/null 2>&1; then
nmcli -g GENERAL.STATE,IP4.ADDRESS,IP4.GATEWAY,IP4.DNS dev show eth0 2>/dev/null || true
else
echo "MISSING(nmcli)"
fi
} > "$outfile"
}
# Sysctl important (filtered) for compare; keep full as collect-only
collect_sysctl_important() {
local outfile="$1"
# Only record keys that exist on this kernel. Missing keys are intentionally ignored.
# Add/remove keys here as needed.
local keys=(
# kernel
kernel.core_pattern
kernel.core_pipe_limit
kernel.core_uses_pid
kernel.dmesg_restrict
kernel.kptr_restrict
kernel.panic
kernel.panic_on_oops
kernel.pid_max
kernel.randomize_va_space
kernel.sysrq
kernel.sysctl_writes_strict
kernel.unprivileged_bpf_disabled
kernel.perf_event_paranoid
kernel.tainted
kernel.hung_task_timeout_secs
kernel.hung_task_warnings
kernel.hardlockup_panic
kernel.watchdog
kernel.watchdog_thresh
kernel.io_uring_disabled
# user-added (shared memory / semaphores)
kernel.shmall
kernel.shmmax
kernel.shmmni
kernel.sem
# fs
fs.file-max
fs.nr_open
fs.aio-max-nr
fs.inotify.max_user_watches
fs.inotify.max_user_instances
fs.inotify.max_queued_events
fs.epoll.max_user_watches
fs.suid_dumpable
fs.protected_hardlinks
fs.protected_symlinks
fs.protected_fifos
fs.protected_regular
fs.lease-break-time
# vm
vm.swappiness
vm.dirty_background_ratio
vm.dirty_ratio
vm.dirty_expire_centisecs
vm.dirty_writeback_centisecs
vm.overcommit_memory
vm.overcommit_ratio
vm.max_map_count
vm.min_free_kbytes
vm.panic_on_oom
vm.oom_dump_tasks
vm.oom_kill_allocating_task
vm.vfs_cache_pressure
vm.watermark_boost_factor
vm.watermark_scale_factor
vm.zone_reclaim_mode
# user-added (hugepages)
vm.nr_hugepages
vm.nr_hugepages_mempolicy
# net.core
net.core.somaxconn
net.core.netdev_max_backlog
net.core.rmem_max
net.core.wmem_max
net.core.rmem_default
net.core.wmem_default
net.core.optmem_max
net.core.default_qdisc
# net.ipv4
net.ipv4.ip_forward
net.ipv4.ip_local_port_range
net.ipv4.ip_nonlocal_bind
net.ipv4.tcp_syncookies
net.ipv4.tcp_fin_timeout
net.ipv4.tcp_keepalive_time
net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes
net.ipv4.tcp_timestamps
net.ipv4.tcp_sack
net.ipv4.tcp_window_scaling
net.ipv4.tcp_rmem
net.ipv4.tcp_wmem
net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_tw_reuse
net.ipv4.tcp_congestion_control
net.ipv4.tcp_mtu_probing
net.ipv4.icmp_echo_ignore_broadcasts
net.ipv4.conf.all.rp_filter
net.ipv4.conf.default.rp_filter
net.ipv4.conf.all.accept_redirects
net.ipv4.conf.default.accept_redirects
net.ipv4.conf.all.send_redirects
net.ipv4.conf.default.send_redirects
net.ipv4.conf.all.accept_source_route
net.ipv4.conf.default.accept_source_route
net.ipv4.conf.all.log_martians
net.ipv4.conf.default.log_martians
# net.ipv6
net.ipv6.conf.all.disable_ipv6
net.ipv6.conf.default.disable_ipv6
net.ipv6.conf.all.accept_ra
net.ipv6.conf.default.accept_ra
net.ipv6.conf.all.forwarding
net.ipv6.conf.default.forwarding
# conntrack
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_buckets
)
{
echo "# CMD: sysctl -a | filtered important keys (allow-list)"
echo "# DATE: $(date -Iseconds)"
if command -v sysctl >/dev/null 2>&1; then
# Build allow-list for awk (newline separated)
local allowlist
allowlist="$(printf '%s\n' "${keys[@]}")"
sysctl -a 2>/dev/null \
| tr -d '\r' \
| sed -e 's/[[:space:]]\+$//' \
| awk -F' = ' -v ALLOW="$allowlist" '
BEGIN{
n=split(ALLOW, a, "\n");
for(i=1;i<=n;i++){
if(a[i]!="") ok[a[i]]=1;
}
}
{
k=$1;
gsub(/[[:space:]]+$/, "", k);
if(k in ok) print $0;
}
' \
| sort
else
echo "MISSING(sysctl)"
fi
} > "$outfile"
}
# AUTOFS collection (NO manifest.txt)
# - Always creates:
# AUTOFS_auto_master
# AUTOFS_maps (hash lines for map files referenced by auto.master that exist and are non-empty)
# - If /etc/auto.master missing => write "N/A" into both files
collect_autofs() {
local out_master="$1"
local out_maps="$2"
if [[ ! -f /etc/auto.master ]]; then
{
echo "# CMD: AUTOFS (auto.master missing)"
echo "# DATE: $(date -Iseconds)"
echo "N/A (/etc/auto.master not found)"
} > "$out_master"
{
echo "# CMD: AUTOFS maps (auto.master missing)"
echo "# DATE: $(date -Iseconds)"
echo "N/A (/etc/auto.master not found)"
} > "$out_maps"
return 0
fi
{
echo "# CMD: cat /etc/auto.master"
echo "# DATE: $(date -Iseconds)"
cat /etc/auto.master 2>/dev/null || true
} > "$out_master"
# Extract 2nd field (map file) ignoring comments/blank/include lines;
# keep only files that exist and are non-empty.
local files=()
while IFS= read -r mf; do
[[ -z "$mf" ]] && continue
# ignore include lines like "+auto.master"
[[ "$mf" == +* ]] && continue
if [[ "$mf" != /* ]]; then
mf="/etc/${mf}"
fi
[[ -s "$mf" ]] || continue
files+=("$mf")
done < <(
awk '
$0 ~ /^[[:space:]]*#/ {next}
NF < 2 {next}
{print $2}
' /etc/auto.master 2>/dev/null | sed 's/[[:space:]].*$//' | sed '/^$/d' | sort -u
)
{
echo "# CMD: AUTOFS map files referenced by /etc/auto.master (existing + non-empty only)"
echo "# DATE: $(date -Iseconds)"
if [[ "${#files[@]}" -eq 0 ]]; then
echo "NONE (no existing/non-empty map files referenced by /etc/auto.master)"
else
local f
for f in "${files[@]}"; do
# stable: path + sha256
if command -v sha256sum >/dev/null 2>&1; then
printf "FILE: %s sha256=%s\n" "$f" "$(sha256sum "$f" 2>/dev/null | awk '{print $1}')"
else
printf "FILE: %s sha256=MISSING(sha256sum)\n" "$f"
fi
done
fi
} > "$out_maps"
}
# Azure IMDS (only if Virtualization: microsoft)
collect_azure_imds_if_microsoft() {
local out_raw="$1"
local out_pretty="$2"
local hc=""
if command -v hostnamectl >/dev/null 2>&1; then
hc="$(hostnamectl 2>/dev/null || true)"
fi
if echo "$hc" | grep -qi "Virtualization:" && echo "$hc" | grep -qi "microsoft"; then
{
echo "# CMD: curl --noproxy 169.254.169.254 -s -H Metadata:true http://169.254.169.254/metadata/instance?api-version=2021-02-01"
echo "# DATE: $(date -Iseconds)"
if command -v curl >/dev/null 2>&1; then
curl --noproxy 169.254.169.254 --max-time 10 -s -H Metadata:true \
"http://169.254.169.254/metadata/instance?api-version=2021-02-01" 2>/dev/null || true
else
echo "MISSING(curl)"
fi
} > "$out_raw"
{
echo "# CMD: curl ... | python -m json.tool"
echo "# DATE: $(date -Iseconds)"
if command -v curl >/dev/null 2>&1; then
if command -v python >/dev/null 2>&1; then
curl --noproxy 169.254.169.254 --max-time 10 -s -H Metadata:true \
"http://169.254.169.254/metadata/instance?api-version=2021-02-01" 2>/dev/null \
| python -m json.tool 2>/dev/null || true
elif command -v python3 >/dev/null 2>&1; then
curl --noproxy 169.254.169.254 --max-time 10 -s -H Metadata:true \
"http://169.254.169.254/metadata/instance?api-version=2021-02-01" 2>/dev/null \
| python3 -m json.tool 2>/dev/null || true
else
echo "MISSING(python/python3)"
fi
else
echo "MISSING(curl)"
fi
} > "$out_pretty"
else
printf "SKIPPED (Virtualization not microsoft)\n" > "$out_raw"
printf "SKIPPED (Virtualization not microsoft)\n" > "$out_pretty"
fi
}
# --- FS_lsld collection from df (no findmnt files) ---
collect_lsld_mountpoints_from_df() {
local df_file="$1"
local outfile="$2"
{
echo "# CMD: ls -ld (mountpoints from df -hT)"
echo "# DATE: $(date -Iseconds)"
if [[ -f "$df_file" ]]; then
strip_headers < "$df_file" \
| awk '
$1=="Filesystem" {next}
NF<7 {next}
{
type=$2; mp=$7
if (mp=="/" || mp=="") next
if (type ~ /^(tmpfs|devtmpfs|proc|sysfs|cgroup|cgroup2|overlay)$/) next
if (mp ~ /^\/(proc|sys|dev|run|var\/run|var\/lib\/containers|snap|cgroup|cgroup2)/) next
print mp
}
' | sort -u | while read -r mp; do
ls -ld "$mp" 2>/dev/null |sed -E 's/^([dlcbps-][rwxstST-]{9})[.+]/\1/' || echo "MISSING $mp"
done
else
echo "MISSING(FS_df)"
fi
} > "$outfile"
}
# -------- Azure parsing (from AZ_imds_pretty) --------
generate_azure_files_from_imds_pretty() {
local imds_pretty="$1" # e.g. $outdir/AZ_imds_pretty
local outdir="$2"
local out_compute="${outdir}/AZ_Compute"
local out_image="${outdir}/AZ_Image_Reference"
local out_tags="${outdir}/AZ_Tags"
local out_net="${outdir}/AZ_Network"
local out_storage="${outdir}/AZ_Storage"
: > "$out_compute"
: > "$out_image"
: > "$out_tags"
: > "$out_net"
: > "$out_storage"
local py=""
if command -v python3 >/dev/null 2>&1; then
py="python3"
elif command -v python >/dev/null 2>&1; then
py="python"
else
echo "PARSE_ERROR(MISSING python/python3)" > "$out_compute"
echo "PARSE_ERROR(MISSING python/python3)" > "$out_image"
echo "PARSE_ERROR(MISSING python/python3)" > "$out_tags"
echo "PARSE_ERROR(MISSING python/python3)" > "$out_net"
echo "PARSE_ERROR(MISSING python/python3)" > "$out_storage"
return 0
fi
if [[ ! -f "$imds_pretty" ]]; then
echo "PARSE_ERROR(MISSING AZ_imds_pretty)" > "$out_compute"
echo "PARSE_ERROR(MISSING AZ_imds_pretty)" > "$out_image"
echo "PARSE_ERROR(MISSING AZ_imds_pretty)" > "$out_tags"
echo "PARSE_ERROR(MISSING AZ_imds_pretty)" > "$out_net"
echo "PARSE_ERROR(MISSING AZ_imds_pretty)" > "$out_storage"
return 0
fi
"$py" - "$imds_pretty" "$out_compute" "$out_image" "$out_tags" "$out_net" "$out_storage" <<'PY'
import sys, json, re
imds_pretty = sys.argv[1]
out_compute = sys.argv[2]
out_image = sys.argv[3]
out_tags = sys.argv[4]
out_net = sys.argv[5]
out_storage = sys.argv[6]
def writef(path, text):
try:
f = open(path, "w")
try:
f.write(text)
finally:
f.close()
except Exception:
pass
def safe_get(d, path, default=""):
cur = d
for p in path:
try:
if isinstance(cur, list):
cur = cur[p]
else:
cur = cur.get(p, default)
except Exception:
return default
return cur if cur is not None else default
def strip_headers(text):
lines = []
for ln in text.splitlines():
if ln.startswith("# "):
continue
if ln.strip() == "":
continue
lines.append(ln)
return "\n".join(lines).strip()
raw = ""
try:
raw = open(imds_pretty, "r").read()
except Exception as e:
err = "PARSE_ERROR({0})\n".format(e)
writef(out_compute, err); writef(out_image, err); writef(out_tags, err); writef(out_net, err); writef(out_storage, err)
sys.exit(0)
json_text = strip_headers(raw)
if not json_text.startswith("{"):
msg = "PARSE_ERROR(Expecting JSON object)\n"
if "SKIPPED(" in json_text:
msg = json_text.strip() + "\n"
writef(out_compute, msg); writef(out_image, msg); writef(out_tags, msg); writef(out_net, msg); writef(out_storage, msg)
sys.exit(0)
try:
data = json.loads(json_text)
except Exception as e:
err = "PARSE_ERROR({0})\n".format(e)
writef(out_compute, err); writef(out_image, err); writef(out_tags, err); writef(out_net, err); writef(out_storage, err)
sys.exit(0)
compute = data.get("compute", {}) or {}
network = data.get("network", {}) or {}
# ---------------- AZ_Compute ----------------
subid = safe_get(compute, ["subscriptionId"], "")
comp_name = safe_get(compute, ["osProfile","computerName"], "")
vm_name = safe_get(compute, ["name"], "")
vmss = safe_get(compute, ["vmScaleSetName"], "")
vmsize = safe_get(compute, ["vmSize"], "")
rg = safe_get(compute, ["resourceGroupName"], "")
loc = safe_get(compute, ["location"], "")
zone = safe_get(compute, ["zone"], "")
txt = []
txt.append("Subscription ID: {0}".format(subid))
txt.append("Computer Name: {0}".format(comp_name))
txt.append("VM Scale Set Name: {0}".format(vmss))
txt.append("Virtual Machines: {0}".format(vm_name))
txt.append("VM Size: {0}".format(vmsize))
txt.append("Resource Group Name: {0}".format(rg))
txt.append("Location: {0}".format(loc))
txt.append("zone: {0}".format(zone))
writef(out_compute, "\n".join(txt) + "\n")
# ---------------- AZ_Image_Reference ----------------
img_id = safe_get(compute, ["storageProfile","imageReference","id"], "")
img_sub = ""
img_rg = ""
img_gallery = ""
img_image = ""
if isinstance(img_id, str) and img_id:
m = re.search(r"/subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/[^/]+/galleries/([^/]+)/images/([^/]+)", img_id, re.IGNORECASE)
if m:
img_sub, img_rg, img_gallery, img_image = m.group(1), m.group(2), m.group(3), m.group(4)
txt = []
txt.append("Subscription ID: {0}".format(img_sub))
txt.append("Resource Group: {0}".format(img_rg))
if img_gallery or img_image:
txt.append("Galleries: {0}/images/{1}".format(img_gallery, img_image))
else:
txt.append("Galleries: ")
writef(out_image, "\n".join(txt) + "\n")
# ---------------- AZ_Tags ----------------
tags_list = safe_get(compute, ["tagsList"], [])
names = []
vals = []
if isinstance(tags_list, list):
for it in tags_list:
if not isinstance(it, dict):
continue
n = it.get("name", "")
v = it.get("value", "")
names.append(str(n))
vals.append("" if v is None else str(v))
def csv_escape(s):
if s is None:
return ""
s = str(s)
if "," in s or '"' in s:
s = s.replace('"', '""')
return '"' + s + '"'
return s
line1 = "name," + ",".join([csv_escape(x) for x in names]) + "\n"
line2 = "value," + ",".join([csv_escape(x) for x in vals]) + "\n"
writef(out_tags, line1 + line2)
# ---------------- AZ_Network ----------------
priv = ""
pub = ""
cidr = ""
iface0 = ""
try:
iface0 = network.get("interface", [])[0]
except Exception:
iface0 = ""
if isinstance(iface0, dict):
try:
ip0 = iface0.get("ipv4", {}).get("ipAddress", [])[0]
priv = ip0.get("privateIpAddress", "") or ""
pub = ip0.get("publicIpAddress", "") or ""
except Exception:
pass
try:
sn0 = iface0.get("ipv4", {}).get("subnet", [])[0]
addr = sn0.get("address", "") or ""
pref = sn0.get("prefix", "") or ""
if addr and pref:
cidr = "{0}/{1}".format(addr, pref)
except Exception:
pass
txt = []
txt.append("Private IP Address: {0}".format(priv))
txt.append("Public IP Address: {0}".format(pub))
txt.append("Subnet CIDR: {0}".format(cidr))
writef(out_net, "\n".join(txt) + "\n")
# ---------------- AZ_Storage ----------------
sp = safe_get(compute, ["storageProfile"], {}) or {}
# OS Disk
osd = safe_get(sp, ["osDisk"], {}) or {}
os_name = osd.get("name","") if isinstance(osd, dict) else ""
os_size = osd.get("diskSizeGB","") if isinstance(osd, dict) else ""
os_cache = osd.get("caching","") if isinstance(osd, dict) else ""
os_md = osd.get("managedDisk", {}) if isinstance(osd, dict) else {}
os_md_id = os_md.get("id","") if isinstance(os_md, dict) else ""
os_sku = os_md.get("storageAccountType","") if isinstance(os_md, dict) else ""
lines = []
lines.append("OS Disk Name: {0}".format(os_name))
lines.append("OS Disk SizeGB: {0}".format(os_size))
lines.append("OS Disk Caching: {0}".format(os_cache))
lines.append("OS Disk StorageAccountType: {0}".format(os_sku))
lines.append("OS Disk ManagedDiskId: {0}".format(os_md_id))
lines.append("")
# Data Disks
dds = safe_get(sp, ["dataDisks"], [])
if not isinstance(dds, list):
dds = []
def lun_key(x):
try:
return int(x.get("lun","9999"))
except Exception:
return 9999
dds_sorted = sorted([d for d in dds if isinstance(d, dict)], key=lun_key)
lines.append("Data Disks Count: {0}".format(len(dds_sorted)))
for d in dds_sorted:
lun = d.get("lun","")
name = d.get("name","")
size = d.get("diskSizeGB","")
cache = d.get("caching","")
md = d.get("managedDisk", {}) if isinstance(d.get("managedDisk", {}), dict) else {}
md_id = md.get("id","") if isinstance(md, dict) else ""
sku = md.get("storageAccountType","") if isinstance(md, dict) else ""
lines.append("LUN {0}: Name={1}; SizeGB={2}; Caching={3}; StorageAccountType={4}; ManagedDiskId={5}".format(
lun, name, size, cache, sku, md_id
))
# Resource Disk size (if present)
rd = safe_get(sp, ["resourceDisk"], {}) or {}
rd_size = rd.get("size","") if isinstance(rd, dict) else ""
if rd_size != "":
lines.append("")
lines.append("Resource Disk Size: {0}".format(rd_size))
writef(out_storage, "\n".join(lines) + "\n")
PY
}
collect_snapshot_dir() {
local outdir="$1"
mkdirp "$outdir"
# OS/KERNEL/BOOT (COMPARE)
collect_os_release "$outdir/OS_redhat_release"
collect_uname_r "$outdir/KERNEL_uname_r"
collect_grubby_default_kernel "$outdir/BOOT_grubby_default_kernel"
# ETC
[[ -f /etc/hosts ]] && cp -f /etc/hosts "$outdir/ETC_hosts" || printf "MISSING\n" > "$outdir/ETC_hosts"
[[ -f /etc/resolv.conf ]] && cp -f /etc/resolv.conf "$outdir/ETC_resolv" || printf "MISSING\n" > "$outdir/ETC_resolv"
[[ -f /etc/fstab ]] && cp -f /etc/fstab "$outdir/ETC_fstab" || printf "MISSING\n" > "$outdir/ETC_fstab"
[[ -f /etc/passwd ]] && cp -f /etc/passwd "$outdir/ETC_passwd" || printf "MISSING\n" > "$outdir/ETC_passwd"
[[ -f /etc/group ]] && cp -f /etc/group "$outdir/ETC_group" || printf "MISSING\n" > "$outdir/ETC_group"
[[ -f /etc/shadow ]] && cp -f /etc/shadow "$outdir/ETC_shadow" || printf "MISSING\n" > "$outdir/ETC_shadow"
[[ -f /etc/sudoers ]] && cp -f /etc/sudoers "$outdir/ETC_sudoers" || printf "MISSING\n" > "$outdir/ETC_sudoers"
if [[ -d /etc/sudoers.d ]]; then
(cd /etc && tar -czf "$outdir/ETC_sudoers_d.tgz" sudoers.d 2>/dev/null || true)
else
printf "MISSING\n" > "$outdir/ETC_sudoers_d.tgz"
fi
# Java and Python alternatives
collect_one "alternatives_java" "alternatives --list | egrep '^java|^jre'" "$outdir/alternatives_java"
collect_one "alternatives_python" "alternatives --list | egrep '^python'" "$outdir/alternatives_python"
# LIMITS (ulimits)
[[ -f /etc/security/limits.conf ]] && cp -f /etc/security/limits.conf "$outdir/SEC_limits_conf" || printf "MISSING\n" > "$outdir/SEC_limits_conf"
if [[ -d /etc/security/limits.d ]]; then
(cd /etc/security && tar -czf "$outdir/SEC_limits_d.tgz" limits.d 2>/dev/null || true)
else
printf "MISSING\n" > "$outdir/SEC_limits_d.tgz"
fi
# SSH
[[ -f /etc/ssh/ssh_config ]] && cp -f /etc/ssh/ssh_config "$outdir/SSH_etc_ssh_config" || printf "MISSING\n" > "$outdir/SSH_etc_ssh_config"
[[ -f /etc/ssh/sshd_config ]] && cp -f /etc/ssh/sshd_config "$outdir/SSH_etc_sshd_config" || printf "MISSING\n" > "$outdir/SSH_etc_sshd_config"
# SSSD
[[ -f /etc/sssd/sssd.conf ]] && cp -f /etc/sssd/sssd.conf "$outdir/LDAP_etc_sssd_conf" || printf "MISSING\n" > "$outdir/LDAP_etc_sssd_conf"
# AUTOFS (COMPARE) - no manifest
collect_autofs "$outdir/AUTOFS_auto_master" "$outdir/AUTOFS_maps"
# CRON
collect_one "CRON_root" "crontab -l" "$outdir/CRON_root"
[[ -d /var/spool/cron ]] && (tar -czf "$outdir/CRON_spool.tgz" /var/spool/cron 2>/dev/null || true) || printf "MISSING\n" > "$outdir/CRON_spool.tgz"
[[ -d /etc/cron.d ]] && (tar -czf "$outdir/CRON_cron_d.tgz" /etc/cron.d 2>/dev/null || true) || printf "MISSING\n" > "$outdir/CRON_cron_d.tgz"
# FS (FS_df is still collected; not compared)
collect_one "FS_df" "df -hT" "$outdir/FS_df"
collect_one "FS_mount" "mount" "$outdir/FS_mount"
collect_lsld_mountpoints_from_df "$outdir/FS_df" "$outdir/FS_lsld"
# HW (stable lscpu compare)
collect_one "HW_lscpu" "lscpu 2>/dev/null | egrep -i '^(Architecture|CPU\\(s\\)|Thread\\(s\\) per core|Core\\(s\\) per socket|Socket\\(s\\)|Vendor ID|Model name|Hypervisor vendor|Virtualization type):' | sed -e 's/[[:space:]]\\+/ /g' -e 's/ :/:/g' |tr -s '[:space:]'" "$outdir/HW_lscpu"
collect_one "HW_lscpu_full" "lscpu" "$outdir/HW_lscpu_full"
collect_one "HW_lsblk" "lsblk -a -o KNAME,TYPE,SIZE,MODEL,SERIAL |grep -w disk |tr -s '[:space:]'" "$outdir/HW_lsblk"
collect_one "HW_dmidecode" "dmidecode" "$outdir/HW_dmidecode"
collect_one "HW_dmidecode_sys" "dmidecode -t1 |awk '/System Information/{flag=1;next} flag' |sed 's/^[[:space:]]*//g' |tr -s '[:space:]'" "$outdir/HW_dmidecode_sys"
collect_one "HW_dmidecode_base" "dmidecode -t2 |awk '/Base Board Information/{flag=1;next} flag' |sed 's/^[[:space:]]*//g' |tr -s '[:space:]'" "$outdir/HW_dmidecode_base"
collect_one "HW_dmidecode_chas" "dmidecode -t3 |awk '/Chassis Information/{flag=1;next} flag' |sed 's/^[[:space:]]*//g' |tr -s '[:space:]'" "$outdir/HW_dmidecode_chas"
collect_one "HW_dmidecode_proc" "dmidecode -t4 |egrep 'Version|Signature|Speed' |egrep -v 'Unknown|None' |sort |uniq |sed 's/^[[:space:]]*//g' |tr -s '[:space:]'" "$outdir/HW_dmidecode_proc"
collect_one "HW_dmidecode_mem" "dmidecode -t17 |egrep 'Size' |egrep -v 'No Module Installed' |sed 's/^[[:space:]]*//g' |tr -s '[:space:]'" "$outdir/HW_dmidecode_mem"
# PKG
collect_one "PKG_list" "rpm -qa --qf '%{NAME} %{VERSION}-%{RELEASE} %{ARCH}\n' | sort" "$outdir/PKG_list"
# SYSCTL: compare filtered; collect full separately
collect_sysctl_important "$outdir/SYS_sysctl_imp"
collect_one "SYS_sysctl_full" "sysctl -a" "$outdir/SYS_sysctl_full"
# SERVICES
collect_one "SVC_systemctl_failed" "systemctl list-units --type=service --state=failed --no-legend --no-pager --plain 2>/dev/null |tr -s '[:space:]'" "$outdir/SVC_systemctl_failed"
collect_one "SVC_systemctl_running" "systemctl list-units --type=service --state=running --no-legend --no-pager --plain 2>/dev/null |tr -s '[:space:]'" "$outdir/SVC_systemctl_running"
collect_one "SVC_systemctl_enabled" "systemctl list-unit-files --type=service --no-legend --no-pager |awk '{print $1, $2}' |sort" "$outdir/SVC_systemctl_enabled"
collect_one "SVC_chkconfig" "chkconfig --list" "$outdir/SVC_chkconfig"
# BOOT / KERNEL (collect-only + compare already handled above)
collect_one "BOOT_files" "ls -l /boot |sed -E 's/^([dlcbps-][rwxstST-]{9})[.+]/\\1/' |egrep -v 'total|lost\\+' |tr -s '[:space:]'" "$outdir/BOOT_files"
collect_one "KERNEL_cmdline" "cat /proc/cmdline" "$outdir/KERNEL_cmdline"
collect_one "KERNEL_lsmod" "lsmod" "$outdir/KERNEL_lsmod"
collect_grubby_info_all "$outdir/BOOT_grubby_info_all" # collect-only
# NetworkManager collect-only
collect_nmcli_con_show "$outdir/NET_nmcli_con_show" # collect-only
collect_nmcli_dev_eth0 "$outdir/NET_nmcli_eth0" # collect-only
# NFS exports
[[ -f /etc/exports ]] && cp -f /etc/exports "$outdir/ETC_exports" || printf "MISSING\n" > "$outdir/ETC_exports"
collect_one "NFS_exportfs_v" "exportfs -v" "$outdir/NFS_exportfs_v"
# Security / misc compares
collect_one "SEC_getenforce" "getenforce" "$outdir/SEC_getenforce"
collect_timezone "$outdir/SYS_timezone"
collect_one "SYS_umask" "umask" "$outdir/SYS_umask"
collect_one "SEC_sestatus" "sestatus" "$outdir/SEC_sestatus"
# ---------------- Capture Data Only (No comparison) ----------------
collect_one "LVM_blkid" "blkid" "$outdir/LVM_blkid"
collect_one "LVM_pvs_full" "pvs" "$outdir/LVM_pvs_full"
collect_one "LVM_vgs_full" "vgs" "$outdir/LVM_vgs_full"
collect_one "LVM_lvs_full" "lvs" "$outdir/LVM_lvs_full"
collect_one "LVM_pvdisplay" "pvdisplay" "$outdir/LVM_pvdisplay"
collect_one "LVM_vgdisplay" "vgdisplay" "$outdir/LVM_vgdisplay"
collect_one "LVM_lvdisplay" "lvdisplay" "$outdir/LVM_lvdisplay"
# Stable LVM capture (for compare)
collect_one "LVM_pvs_stable" "pvs |awk 'NR==1{next} {print \$1\" \"\$2\" \"\$5}'" "$outdir/LVM_pvs_stable"
collect_one "LVM_vgs_stable" "vgs |awk 'NR==1{next} {print \$1\" \"\$2\" \"\$3\" \"\$6}'" "$outdir/LVM_vgs_stable"
collect_one "LVM_lvs_stable" "lvs |awk 'NR==1{next} {print \$1\" \"\$2\" \"\$3\" \"\$4}'" "$outdir/LVM_lvs_stable"
collect_one "NET_ifconfig" "ifconfig 2>/dev/null |grep inet |egrep -v 'inet6|127.0.0.1' |sed 's/^ *//g' |tr -s '[:space:]' || true" "$outdir/NET_ifconfig"
collect_one "NET_ip_addr" "ip addr 2>/dev/null |grep inet |egrep -v 'inet6|127.0.0.1' |sed 's/^ *//g' |tr -s '[:space:]' || true" "$outdir/NET_ip_addr"
collect_one "NET_ip_route" "ip route 2>/dev/null |sed 's/^ *//g' |tr -s '[:space:]' || true" "$outdir/NET_ip_route"
collect_one "NET_netstat_nr" "netstat -nr |tr -s '[:space:]'" "$outdir/NET_netstat_nr"
collect_one "NET_netstat_s" "netstat -s |tr -s '[:space:]'" "$outdir/NET_netstat_s"
collect_one "LSOF_listen" "lsof -i -sTCP:LISTEN" "$outdir/LSOF_listen"
collect_one "LSOF_established" "lsof -i -sTCP:ESTABLISHED" "$outdir/LSOF_established"
collect_one "LSOF_PNI_listen" "lsof -Pni -sTCP:LISTEN" "$outdir/LSOF_PNI_listen"
collect_one "LSOF_PNI_estab" "lsof -Pni -sTCP:ESTABLISHED" "$outdir/LSOF_PNI_estab"
collect_one "NTP_ntpq_p" "ntpq -p" "$outdir/NTP_ntpq_p"
collect_one "SYS_hostnamectl" "hostnamectl" "$outdir/SYS_hostnamectl"
collect_one "SYS_uptime" "uptime" "$outdir/SYS_uptime"
collect_one "SYS_who_b" "who -b" "$outdir/SYS_who_b"
collect_one "SYS_last" "last" "$outdir/SYS_last"
collect_one "SYS_ps_ef" "ps -ef" "$outdir/SYS_ps_ef"
collect_one "PROC_cmdline" "cat /proc/cmdline" "$outdir/PROC_cmdline"
collect_one "PROC_meminfo" "cat /proc/meminfo" "$outdir/PROC_meminfo"
# Azure IMDS (raw + pretty)
collect_azure_imds_if_microsoft "$outdir/AZ_imds_raw" "$outdir/AZ_imds_pretty"
# derive comparison files from AZ_imds_pretty
generate_azure_files_from_imds_pretty "$outdir/AZ_imds_pretty" "$outdir"
}
create_snapshot() {
mkdirp "$RUN_DIR"
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
rotate_file "$base"
rotate_prune "$base"
local stamp workdir inner
stamp="$(date +%s)"
workdir="$(mktemp -d "${RUN_DIR}/vitals_${host}_${stamp}_XXXX")"
inner="${workdir}/vitals_${host}_${stamp}"
mkdirp "$inner"
collect_snapshot_dir "$inner"
tar -czf "$base" -C "$workdir" "$(basename "$inner")"
rm -rf "$workdir"
msg -s "Snapshot created: $base"
log_line "Snapshot created: $base"
}
# -------- Snapshot discovery / listing --------
list_archives() {
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
local files=()
[[ -f "$base" ]] && files+=("$base")
while IFS= read -r f; do files+=("$f"); done < <(ls -1 "${base}."[0-9]* 2>/dev/null || true)
if [[ "${#files[@]}" -eq 0 ]]; then
msg -w "No snapshots found in ${RUN_DIR}."
return 1
fi
mapfile -t files < <(
printf "%s\n" "${files[@]}" \
| xargs -I{} bash -lc 'printf "%s\t%s\n" "$(stat -c %Y "{}")" "{}"' \
| sort -rn \
| awk -F'\t' '{print $2}'
)
local idx=0 f epoch human
for f in "${files[@]}"; do
epoch="$(stat -c %Y "$f" 2>/dev/null || echo 0)"
human="$(date -d "@$epoch" '+%Y_%b_%d_%H:%M:%S' 2>/dev/null || date '+%Y_%b_%d_%H:%M:%S')"
printf "%${SPACE}s : %s : %s\n" "$idx" "$human" "$f"
idx=$((idx+1))
done
}
get_oldest_archive_in_dir() {
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
local max="" f n
shopt -s nullglob
for f in "${base}."[0-9]*; do
n="${f##*.}"
if [[ "$n" =~ ^[0-9]+$ ]]; then
if [[ -z "$max" || "$n" -gt "$max" ]]; then
max="$n"
fi
fi
done
shopt -u nullglob
if [[ -n "$max" && -f "${base}.${max}" ]]; then
echo "${base}.${max}"
return 0
fi
[[ -f "$base" ]] && echo "$base" && return 0
return 1
}
get_newest_archive() {
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
[[ -f "$base" ]] && echo "$base" && return 0
return 1
}
# -------- Compare helpers --------
extract_archive() {
local arc="$1" dest="$2"
mkdirp "$dest"
tar -xzf "$arc" -C "$dest"
}
diff_old_new_lines() {
local oldf="$1" newf="$2"
local old_only new_only
old_only="$(diff -u "$oldf" "$newf" 2>/dev/null | awk '/^-[^-]/ {sub(/^-/, "", $0); print $0}' | head -200 || true)"
new_only="$(diff -u "$oldf" "$newf" 2>/dev/null | awk '/^\+[^\+]/ {sub(/^\+/, "", $0); print $0}' | head -200 || true)"
[[ -z "$old_only" && -z "$new_only" ]] && return 1
printf "%s\n" "$old_only" | sed '/^$/d' || true
echo "----SPLIT----"
printf "%s\n" "$new_only" | sed '/^$/d' || true
}
# ---------------- TGZ deep compare (Added/Removed/Modified + optional content diffs) ----------------
extract_tgz_to() {
local tgz="$1" dest="$2"
mkdir -p "$dest"
tar -xzf "$tgz" -C "$dest" 2>/dev/null || true
}
hash_tree() {
local root="$1"
[[ -d "$root" ]] || return 0
(cd "$root" && find . -xdev -type f -print0 2>/dev/null) | while IFS= read -r -d '' f; do
local p mode uid gid sha
p="${f#./}"
mode="$(stat -c '%a' "$root/$p" 2>/dev/null || echo "?")"
uid="$(stat -c '%u' "$root/$p" 2>/dev/null || echo "?")"
gid="$(stat -c '%g' "$root/$p" 2>/dev/null || echo "?")"
sha="$(sha256sum "$root/$p" 2>/dev/null | awk '{print $1}' || echo "?")"
printf "F|%s|%s|%s|%s|%s\n" "$p" "$mode" "$uid" "$gid" "$sha"
done
(cd "$root" && find . -xdev -type d -print 2>/dev/null) | sed 's|^\./||' | while IFS= read -r d; do
[[ -z "$d" || "$d" == "." ]] && continue
local mode uid gid
mode="$(stat -c '%a' "$root/$d" 2>/dev/null || echo "?")"
uid="$(stat -c '%u' "$root/$d" 2>/dev/null || echo "?")"
gid="$(stat -c '%g' "$root/$d" 2>/dev/null || echo "?")"
printf "D|%s|%s|%s|%s|\n" "$d" "$mode" "$uid" "$gid"
done
(cd "$root" && find . -xdev -type l -print 2>/dev/null) | sed 's|^\./||' | while IFS= read -r l; do
[[ -z "$l" ]] && continue
local mode uid gid tgt
mode="$(stat -c '%a' "$root/$l" 2>/dev/null || echo "?")"
uid="$(stat -c '%u' "$root/$l" 2>/dev/null || echo "?")"
gid="$(stat -c '%g' "$root/$l" 2>/dev/null || echo "?")"
tgt="$(readlink "$root/$l" 2>/dev/null || echo "?")"
printf "L|%s|%s|%s|%s|%s\n" "$l" "$mode" "$uid" "$gid" "$tgt"
done
}
is_text_file() {
local f="$1"
[[ -f "$f" ]] || return 1
if command -v file >/dev/null 2>&1; then
local mt
mt="$(file -b --mime-type "$f" 2>/dev/null || echo "")"
case "$mt" in
text/*|application/json|application/xml|application/x-sh|application/x-empty) return 0 ;;
*) return 1 ;;
esac
fi
grep -Iq . "$f" 2>/dev/null
}
safe_cat_head() {
local f="$1"
local max_lines="${2:-200}"
[[ -f "$f" ]] || return 0
sed -n "1,${max_lines}p" "$f" 2>/dev/null || true
}
tgz_print_file_content_delta() {
local label="$1"
local kind="$2" # REMOVED|ADDED|MODIFIED
local relpath="$3"
local olddir="$4"
local newdir="$5"
local oldf="${olddir}/${relpath}"
local newf="${newdir}/${relpath}"
local MAX_LINES="${TGZ_MAX_LINES:-220}"
case "$kind" in
REMOVED)
[[ -f "$oldf" ]] || return 0
if is_text_file "$oldf"; then
msg "$label" "OLD" "REMOVED FILE CONTENT: ${relpath}"
msg "$label" "OLD" "$(safe_cat_head "$oldf" "$MAX_LINES")"
else
msg "$label" "OLD" "REMOVED BINARY/UNSUPPORTED FILE: ${relpath} (content diff skipped)"
fi
;;
ADDED)
[[ -f "$newf" ]] || return 0
if is_text_file "$newf"; then
msg "$label" "NEW" "ADDED FILE CONTENT: ${relpath}"
msg "$label" "NEW" "$(safe_cat_head "$newf" "$MAX_LINES")"
else
msg "$label" "NEW" "ADDED BINARY/UNSUPPORTED FILE: ${relpath} (content diff skipped)"
fi
;;
MODIFIED)
[[ -f "$oldf" && -f "$newf" ]] || return 0
if is_text_file "$oldf" && is_text_file "$newf"; then
local tmpo tmpn diffraw oldchg newchg
tmpo="$(mktemp)"; tmpn="$(mktemp)"
normalize_for_diff < "$oldf" > "$tmpo" 2>/dev/null || true
normalize_for_diff < "$newf" > "$tmpn" 2>/dev/null || true
diffraw="$(
diff -U0 "$tmpo" "$tmpn" 2>/dev/null \
| grep -E '^[+-]' \
| grep -vE '^(---|\+\+\+)' \
|| true
)"
rm -f "$tmpo" "$tmpn"
oldchg="$(printf "%s\n" "$diffraw" | awk '/^-/ {sub(/^-/, "", $0); print}' | head -n "$MAX_LINES" | sed '/^$/d')"
newchg="$(printf "%s\n" "$diffraw" | awk '/^\+/ {sub(/^\+/, "", $0); print}' | head -n "$MAX_LINES" | sed '/^$/d')"
if [[ -n "${oldchg// }" || -n "${newchg// }" ]]; then
msg "$label" "OLD" "MODIFIED FILE CHANGES: ${relpath} (compact OLD vs NEW, truncated)"
[[ -n "${oldchg// }" ]] && msg "$label" "OLD" "$oldchg"
[[ -n "${newchg// }" ]] && msg "$label" "NEW" "$newchg"
fi
else
msg "$label" "OLD" "MODIFIED BINARY/UNSUPPORTED FILE: ${relpath} (content diff skipped)"
fi
;;
esac
}
compare_tgz_contents() {
local label="$1" oldtgz="$2" newtgz="$3"
if [[ ! -f "$oldtgz" || ! -f "$newtgz" ]]; then
msg "$label" "N/A"
return 0
fi
local tmpbase olddir newdir oldinv newinv
tmpbase="$(mktemp -d /tmp/tgzcmp_XXXXXX)"
olddir="${tmpbase}/old"
newdir="${tmpbase}/new"
oldinv="${tmpbase}/old.inv"
newinv="${tmpbase}/new.inv"
mkdir -p "$olddir" "$newdir"
extract_tgz_to "$oldtgz" "$olddir"
extract_tgz_to "$newtgz" "$newdir"
hash_tree "$olddir" 2>/dev/null | sort -u > "$oldinv" || true
hash_tree "$newdir" 2>/dev/null | sort -u > "$newinv" || true
if [[ ! -s "$oldinv" || ! -s "$newinv" ]]; then
local ocs ncs
ocs="$(sha256sum "$oldtgz" 2>/dev/null | awk '{print $1}' || echo "?")"
ncs="$(sha256sum "$newtgz" 2>/dev/null | awk '{print $1}' || echo "?")"
if [[ "$ocs" == "$ncs" ]]; then
msg "$label" "OK"
else
msg "$label" "KO"
msg "$label" "OLD" "$oldtgz (sha256=${ocs})"
msg "$label" "NEW" "$newtgz (sha256=${ncs})"
fi
rm -rf "$tmpbase"
return 0
fi
if diff -q "$oldinv" "$newinv" >/dev/null 2>&1; then
msg "$label" "OK"
rm -rf "$tmpbase"
return 0
fi
msg "$label" "KO"
local delta
delta="$(awk -F'|' '
function key(t,p){ return t "|" p }
FNR==NR{ k=key($1,$2); old[k]=$0; next }
{ k=key($1,$2); new[k]=$0 }
END{
for(k in old){
if(!(k in new)){
split(k,a,"|"); printf "__REM__|%s|%s|%s\n", a[1], a[2], old[k]
} else if(old[k] != new[k]){
split(k,a,"|"); printf "__MOD__|%s|%s|%s|%s\n", a[1], a[2], old[k], new[k]
}
}
for(k in new){
if(!(k in old)){
split(k,a,"|"); printf "__ADD__|%s|%s|%s\n", a[1], a[2], new[k]
}
}
}
' "$oldinv" "$newinv" 2>/dev/null | sort | head -900 || true)"
local removed_block added_block mod_old_block mod_new_block
removed_block="$(printf "%s\n" "$delta" | awk -F'|' '$1=="__REM__"{printf "REMOVED: %s %s\n", $2, $3}' | head -200 || true)"
added_block="$(printf "%s\n" "$delta" | awk -F'|' '$1=="__ADD__"{printf "ADDED : %s %s\n", $2, $3}' | head -200 || true)"
mod_old_block="$(printf "%s\n" "$delta" | awk -F'|' '
$1=="__MOD__"{ split($4,a,"|"); printf "MODIFIED: %s %s (old: mode=%s uid=%s gid=%s meta=%s)\n", $2, $3, a[3], a[4], a[5], a[6] }' | head -200 || true)"
mod_new_block="$(printf "%s\n" "$delta" | awk -F'|' '
$1=="__MOD__"{ split($5,a,"|"); printf "MODIFIED: %s %s (new: mode=%s uid=%s gid=%s meta=%s)\n", $2, $3, a[3], a[4], a[5], a[6] }' | head -200 || true)"
local old_out new_out
old_out="$(printf "%s\n%s\n" "$removed_block" "$mod_old_block" | sed '/^$/d' || true)"
new_out="$(printf "%s\n%s\n" "$added_block" "$mod_new_block" | sed '/^$/d' || true)"
[[ -n "${old_out// }" ]] && msg "$label" "OLD" "$old_out"
[[ -n "${new_out// }" ]] && msg "$label" "NEW" "$new_out"
# Default OFF; enable via env TGZ_CONTENT_DIFF=1 or flag --tgz-content
local CONTENT_DIFF="${TGZ_CONTENT_DIFF:-0}"
[[ "$FORCE_TGZ_CONTENT" == "1" ]] && CONTENT_DIFF=1
local max_files="${TGZ_MAX_FILES:-30}"
local count=0
if [[ "$CONTENT_DIFF" == "1" ]]; then
while IFS='|' read -r tag typ path _rest; do
[[ "$tag" == "__REM__" && "$typ" == "F" ]] || continue
count=$((count+1)); (( count > max_files )) && break
tgz_print_file_content_delta "$label" "REMOVED" "$path" "$olddir" "$newdir"
done <<< "$delta"
while IFS='|' read -r tag typ path _rest; do
[[ "$tag" == "__ADD__" && "$typ" == "F" ]] || continue
count=$((count+1)); (( count > max_files )) && break
tgz_print_file_content_delta "$label" "ADDED" "$path" "$olddir" "$newdir"
done <<< "$delta"
while IFS='|' read -r tag typ path _oldline _newline; do
[[ "$tag" == "__MOD__" && "$typ" == "F" ]] || continue
count=$((count+1)); (( count > max_files )) && break
tgz_print_file_content_delta "$label" "MODIFIED" "$path" "$olddir" "$newdir"
done <<< "$delta"
fi
rm -rf "$tmpbase"
}
compare_item() {
local label="${1:-}"
local rel="${2:-}"
local oldroot="${3:-}"
local newroot="${4:-}"
local oldf="${oldroot}/${rel}"
local newf="${newroot}/${rel}"
if [[ -z "$label" || -z "$rel" || -z "$oldroot" || -z "$newroot" ]]; then
msg -e "compare_item(): missing arguments (label=$label rel=$rel)."
return 1
fi
if [[ ! -e "$oldf" || ! -e "$newf" ]]; then
msg "$label" "N/A"
return 0
fi
if [[ "$rel" == *.tgz ]]; then
compare_tgz_contents "$label" "$oldf" "$newf"
return 0
fi
local tmp_old tmp_new
tmp_old="$(mktemp)"
tmp_new="$(mktemp)"
strip_headers < "$oldf" | normalize_for_diff > "$tmp_old" || true
strip_headers < "$newf" | normalize_for_diff > "$tmp_new" || true
if diff -q "$tmp_old" "$tmp_new" >/dev/null 2>&1; then
msg "$label" "OK"
rm -f "$tmp_old" "$tmp_new"
return 0
fi
msg "$label" "KO"
local merged old_lines new_lines
if merged="$(diff_old_new_lines "$tmp_old" "$tmp_new" 2>/dev/null || true)"; then
old_lines="$(printf "%s\n" "$merged" | awk 'BEGIN{p=1} $0=="----SPLIT----"{p=0;next} p==1{print}')"
new_lines="$(printf "%s\n" "$merged" | awk 'BEGIN{p=0} $0=="----SPLIT----"{p=1;next} p==1{print}')"
if [[ -z "${old_lines// }" && -z "${new_lines// }" ]]; then
old_lines="$(head -30 "$tmp_old" || true)"
new_lines="$(head -30 "$tmp_new" || true)"
fi
[[ -n "${old_lines// }" ]] && msg "$label" "OLD" "$old_lines"
[[ -n "${new_lines// }" ]] && msg "$label" "NEW" "$new_lines"
else
msg "$label" "OLD" "$(head -30 "$tmp_old" || true)"
msg "$label" "NEW" "$(head -30 "$tmp_new" || true)"
fi
rm -f "$tmp_old" "$tmp_new"
}
# FS_mount stable compare (parse mount output "src on /mp type fstype (...)")
mount_stable_list() {
local file="$1"
[[ -f "$file" ]] || return 0
strip_headers < "$file" \
| awk '
BEGIN{OFS="|"}
{
src=$1
if ($2!="on") next
mp=$3
if ($4!="type") next
fs=$5
if (src==""||mp==""||fs=="") next
if (fs ~ /^(proc|sysfs|tmpfs|devtmpfs|cgroup|cgroup2|overlay|securityfs|pstore|debugfs|tracefs|nsfs|autofs|mqueue|hugetlbfs|fusectl|bpf)$/) next
if (mp ~ /^\/(proc|sys|dev|run|var\/run|cgroup|cgroup2)/) next
print src,fs,mp
}
' \
| normalize_for_diff \
| sort -u
}
compare_mount_stable() {
local label="$1"
local oldroot="$2"
local newroot="$3"
local oldf="${oldroot}/FS_mount"
local newf="${newroot}/FS_mount"
if [[ ! -f "$oldf" || ! -f "$newf" ]]; then
msg "$label" "N/A"
return 0
fi
local tmpo tmpn
tmpo="$(mktemp)"; tmpn="$(mktemp)"
mount_stable_list "$oldf" > "$tmpo"
mount_stable_list "$newf" > "$tmpn"
if diff -q "$tmpo" "$tmpn" >/dev/null 2>&1; then
msg "$label" "OK"
rm -f "$tmpo" "$tmpn"
return 0
fi
msg "$label" "KO"
local merged old_lines new_lines
merged="$(diff_old_new_lines "$tmpo" "$tmpn" 2>/dev/null || true)"
old_lines="$(printf "%s\n" "$merged" | awk 'BEGIN{p=1} $0=="----SPLIT----"{p=0;next} p==1{print}')"
new_lines="$(printf "%s\n" "$merged" | awk 'BEGIN{p=0} $0=="----SPLIT----"{p=1;next} p==1{print}')"
[[ -n "${old_lines// }" ]] && msg "$label" "OLD" "$(printf "%s\n" "$old_lines" | awk -F'|' '{printf "%-35s %-8s %s\n",$1,$2,$3}' | head -150)"
[[ -n "${new_lines// }" ]] && msg "$label" "NEW" "$(printf "%s\n" "$new_lines" | awk -F'|' '{printf "%-35s %-8s %s\n",$1,$2,$3}' | head -150)"
rm -f "$tmpo" "$tmpn"
}
fstab_mountpoints() {
local file="$1"
[[ -f "$file" ]] || return 0
awk '
$0 ~ /^[[:space:]]*#/ {next}
NF<4 {next}
{
m=$2; t=$3; o=$4
if (m=="" || m=="none") next
if (t ~ /^(swap|proc|sysfs|devpts|tmpfs|devtmpfs|cgroup|cgroup2|overlay)$/) next
print m "|" t "|" o
}
' "$file"
}
check_mount_order() {
local label="$1"
local new_list="$2"
local rc=0
awk '
function parent(p) {
sub(/\/+$/, "", p)
if (p=="" || p=="/") return ""
sub(/\/[^\/]+$/, "", p)
if (p=="") p="/"
return p
}
BEGIN { i=0 }
{
mp=$0
if (mp=="") next
a[i]=mp
idx[mp]=i
i++
}
END {
for (j=0; j<i; j++) {
mp=a[j]
p=parent(mp)
while (p!="") {
if (p in idx) {
if (idx[p] > idx[mp]) {
printf("__WARN__ %s is mounted before %s\n", mp, p)
}
}
if (p=="/") break
p=parent(p)
}
}
}
' <<< "$new_list" | while read -r line; do
[[ -z "$line" ]] && continue
if [[ "$line" == __WARN__* ]]; then
msg -w "${label}: ${line#__WARN__ }"
rc=1
fi
done
return $rc
}
compare_fstab_smart() {
local label="$1"
local oldroot="$2"
local newroot="$3"
local oldf="${oldroot}/ETC_fstab"
local newf="${newroot}/ETC_fstab"
if [[ ! -f "$oldf" || ! -f "$newf" ]]; then
msg "$label" "N/A"
return 0
fi
local rc=0
local new_entries old_entries
new_entries="$(fstab_mountpoints "$newf" || true)"
old_entries="$(fstab_mountpoints "$oldf" || true)"
local new_mps old_mps
new_mps="$(printf "%s\n" "$new_entries" | awk -F'|' '{print $1}' | sed '/^$/d' || true)"
old_mps="$(printf "%s\n" "$old_entries" | awk -F'|' '{print $1}' | sed '/^$/d' || true)"
if [[ -n "$new_mps" ]]; then
check_mount_order "$label" "$new_mps" || rc=1
fi
while IFS= read -r line; do
[[ -z "$line" ]] && continue
local mp opt
mp="$(awk -F'|' '{print $1}' <<< "$line")"
opt="$(awk -F'|' '{print $3}' <<< "$line")"
if echo "$opt" | tr ',' '\n' | grep -qx "noauto"; then
msg -w "${label}: FS is NOT mounted automatically (noauto): ${mp}"
rc=1
fi
done <<< "$new_entries"
local mp
for mp in $new_mps; do
echo "$old_mps" | grep -qx "$mp" || { msg -w "${label}: Filesystem NOT present before (fstab): $mp"; rc=1; }
done
for mp in $old_mps; do
echo "$new_mps" | grep -qx "$mp" || { msg -w "${label}: Filesystem NOT present now (fstab): $mp"; rc=1; }
done
if [[ $rc -eq 0 ]]; then
msg "$label" "OK"
else
msg "$label" "KO"
msg "$label" "OLD" "$(printf "%s\n" "$old_entries" | awk -F'|' '{printf "%-30s %-8s %s\n",$1,$2,$3}' | head -80)"
msg "$label" "NEW" "$(printf "%s\n" "$new_entries" | awk -F'|' '{printf "%-30s %-8s %s\n",$1,$2,$3}' | head -80)"
fi
}
parse_lsld_file() {
local file="$1"
[[ -f "$file" ]] || return 0
awk '
$0 ~ /^#/ {next}
$1 ~ /^MISSING/ {next}
NF>=9 {
perm=$1; owner=$3; group=$4; path=$9;
print path "|" perm "|" owner "|" group
}
' "$file"
}
compare_lsld_smart() {
local label="$1"
local oldroot="$2"
local newroot="$3"
local oldf="${oldroot}/FS_lsld"
local newf="${newroot}/FS_lsld"
if [[ ! -f "$oldf" || ! -f "$newf" ]]; then
msg "$label" "N/A"
return 0
fi
local rc=0
local old_data new_data
old_data="$(parse_lsld_file "$oldf" | normalize_for_diff || true)"
new_data="$(parse_lsld_file "$newf" | normalize_for_diff || true)"
local paths
paths="$(printf "%s\n%s\n" "$old_data" "$new_data" | awk -F'|' '{print $1}' | sort -u | sed '/^$/d')"
local p old_line new_line
for p in $paths; do
old_line="$(printf "%s\n" "$old_data" | awk -F'|' -v P="$p" '$1==P{print;exit}')"
new_line="$(printf "%s\n" "$new_data" | awk -F'|' -v P="$p" '$1==P{print;exit}')"
if [[ -z "$new_line" && -n "$old_line" ]]; then
msg -w "${label}: Removed filesystem/mountpoint: $p"
rc=1
continue
fi
if [[ -n "$new_line" && -z "$old_line" ]]; then
msg -w "${label}: New filesystem/mountpoint: $p"
rc=1
continue
fi
local old_perm old_owner old_group new_perm new_owner new_group
old_perm="$(awk -F'|' '{print $2}' <<< "$old_line")"
old_owner="$(awk -F'|' '{print $3}' <<< "$old_line")"
old_group="$(awk -F'|' '{print $4}' <<< "$old_line")"
new_perm="$(awk -F'|' '{print $2}' <<< "$new_line")"
new_owner="$(awk -F'|' '{print $3}' <<< "$new_line")"
new_group="$(awk -F'|' '{print $4}' <<< "$new_line")"
[[ "$new_perm" == "$old_perm" ]] || { msg -w "${label}: FS permissions changed for '$p': Old: $old_perm / New: $new_perm"; rc=1; }
[[ "$new_owner" == "$old_owner" ]] || { msg -w "${label}: FS owner changed for '$p': Old: $old_owner / New: $new_owner"; rc=1; }
[[ "$new_group" == "$old_group" ]] || { msg -w "${label}: FS group changed for '$p': Old: $old_group / New: $new_group"; rc=1; }
done
if [[ $rc -eq 0 ]]; then
msg "$label" "OK"
else
msg "$label" "KO"
fi
}
exports_paths() {
local file="$1"
[[ -f "$file" ]] || return 0
awk '
$0 ~ /^[[:space:]]*#/ {next}
NF>=1 {print $1}
' "$file" | sed '/^$/d'
}
compare_exports_smart() {
local label="$1"
local oldroot="$2"
local newroot="$3"
local old_exports="${oldroot}/ETC_exports"
local new_exports="${newroot}/ETC_exports"
local old_exportfs="${oldroot}/NFS_exportfs_v"
local new_exportfs="${newroot}/NFS_exportfs_v"
if [[ ! -f "$old_exports" || ! -f "$new_exports" ]]; then
msg "$label" "N/A"
return 0
fi
local rc=0
local old_paths new_paths
old_paths="$(exports_paths "$old_exports" || true)"
new_paths="$(exports_paths "$new_exports" || true)"
local p
for p in $old_paths; do
echo "$new_paths" | grep -qx "$p" || { msg "$label" "OLD" "$p"; rc=1; }
done
for p in $new_paths; do
echo "$old_paths" | grep -qx "$p" || { msg "$label" "NEW" "$p"; rc=1; }
done
if [[ -f "$old_exportfs" && -f "$new_exportfs" ]]; then
local tmpo tmpn
tmpo="$(mktemp)"; tmpn="$(mktemp)"
strip_headers < "$old_exportfs" | normalize_for_diff > "$tmpo" || true
strip_headers < "$new_exportfs" | normalize_for_diff > "$tmpn" || true
if ! diff -q "$tmpo" "$tmpn" >/dev/null 2>&1; then
msg -w "${label}: exportfs -v output changed (clients/options may differ)"
local merged old_lines new_lines
if merged="$(diff_old_new_lines "$tmpo" "$tmpn" 2>/dev/null || true)"; then
old_lines="$(printf "%s\n" "$merged" | awk 'BEGIN{p=1} $0=="----SPLIT----"{p=0;next} p==1{print}')"
new_lines="$(printf "%s\n" "$merged" | awk 'BEGIN{p=0} $0=="----SPLIT----"{p=1;next} p==1{print}')"
[[ -n "${old_lines// }" ]] && msg "$label" "OLD" "$old_lines"
[[ -n "${new_lines// }" ]] && msg "$label" "NEW" "$new_lines"
fi
rc=1
fi
rm -f "$tmpo" "$tmpn"
fi
if [[ $rc -eq 0 ]]; then
msg "$label" "OK"
else
msg "$label" "KO"
fi
}
compare_timezone() {
local label="$1"
local oldroot="$2"
local newroot="$3"
local oldf="${oldroot}/SYS_timezone"
local newf="${newroot}/SYS_timezone"
if [[ ! -f "$oldf" || ! -f "$newf" ]]; then
msg "$label" "N/A"
return 0
fi
local old_tz new_tz
old_tz="$(strip_headers < "$oldf" | normalize_for_diff | head -1 || true)"
new_tz="$(strip_headers < "$newf" | normalize_for_diff | head -1 || true)"
if [[ -z "$old_tz" || -z "$new_tz" ]]; then
msg "$label" "N/A"
return 0
fi
if [[ "$old_tz" == "$new_tz" ]]; then
msg "$label" "OK"
else
msg "$label" "KO"
msg "$label" "OLD" "$old_tz"
msg "$label" "NEW" "$new_tz"
fi
}
# -------- Compare run --------
compare_two_archives_summary() {
local new_arc="$1" old_arc="$2"
if [[ ! -f "$new_arc" || ! -f "$old_arc" ]]; then
msg -e "Archive missing for compare."
return 1
fi
local tmpbase oldtmp newtmp
tmpbase="$(mktemp -d /tmp/vitals_cmp_XXXXXX)"
oldtmp="${tmpbase}/old"
newtmp="${tmpbase}/new"
mkdirp "$oldtmp" "$newtmp"
extract_archive "$old_arc" "$oldtmp"
extract_archive "$new_arc" "$newtmp"
local oldroot newroot
oldroot="$(find "$oldtmp" -mindepth 1 -maxdepth 1 -type d | head -1)"
newroot="$(find "$newtmp" -mindepth 1 -maxdepth 1 -type d | head -1)"
if [[ -z "$oldroot" || -z "$newroot" ]]; then
rm -rf "$tmpbase"
msg -e "Unable to read extracted snapshot directories."
return 1
fi
# OS/KERNEL/BOOT compares
compare_item "OS_redhat_release" "OS_redhat_release" "$oldroot" "$newroot"
compare_item "KERNEL_uname_r" "KERNEL_uname_r" "$oldroot" "$newroot"
compare_item "BOOT_grubby_default" "BOOT_grubby_default_kernel" "$oldroot" "$newroot"
compare_item "ETC_hosts" "ETC_hosts" "$oldroot" "$newroot"
compare_item "ETC_resolv" "ETC_resolv" "$oldroot" "$newroot"
compare_item "ETC_passwd" "ETC_passwd" "$oldroot" "$newroot"
compare_item "ETC_group" "ETC_group" "$oldroot" "$newroot"
compare_item "ETC_shadow" "ETC_shadow" "$oldroot" "$newroot"
compare_item "ETC_sudoers" "ETC_sudoers" "$oldroot" "$newroot"
compare_item "ETC_sudoers_d" "ETC_sudoers_d.tgz" "$oldroot" "$newroot"
# AUTOFS compares (no tgz, no manifest)
compare_item "AUTOFS_master" "AUTOFS_auto_master" "$oldroot" "$newroot"
compare_item "AUTOFS_maps" "AUTOFS_maps" "$oldroot" "$newroot"
compare_item "SEC_limits_conf" "SEC_limits_conf" "$oldroot" "$newroot"
compare_item "SEC_limits_d" "SEC_limits_d.tgz" "$oldroot" "$newroot"
compare_item "SSH_etc_ssh_config" "SSH_etc_ssh_config" "$oldroot" "$newroot"
compare_item "SSH_etc_sshd_config" "SSH_etc_sshd_config" "$oldroot" "$newroot"
compare_item "LDAP_etc_sssd_conf" "LDAP_etc_sssd_conf" "$oldroot" "$newroot"
compare_item "CRON_spool" "CRON_spool.tgz" "$oldroot" "$newroot"
compare_item "CRON_cron_d" "CRON_cron_d.tgz" "$oldroot" "$newroot"
compare_item "ETC_exports" "ETC_exports" "$oldroot" "$newroot"
compare_item "NFS_exportfs_v" "NFS_exportfs_v" "$oldroot" "$newroot"
compare_exports_smart "NFS_exports" "$oldroot" "$newroot"
compare_fstab_smart "ETC_fstab" "$oldroot" "$newroot"
compare_mount_stable "FS_mount" "$oldroot" "$newroot"
compare_lsld_smart "FS_lsld" "$oldroot" "$newroot"
compare_item "alternatives_java" "alternatives_java" "$oldroot" "$newroot"
compare_item "alternatives_python" "alternatives_python" "$oldroot" "$newroot"
compare_item "LVM_PVS" "LVM_pvs_stable" "$oldroot" "$newroot"
compare_item "LVM_VGS" "LVM_vgs_stable" "$oldroot" "$newroot"
compare_item "LVM_LVS" "LVM_lvs_stable" "$oldroot" "$newroot"
compare_item "BOOT_files" "BOOT_files" "$oldroot" "$newroot"
compare_item "KERNEL_cmdline" "KERNEL_cmdline" "$oldroot" "$newroot"
compare_item "SELinux_getenforce" "SEC_getenforce" "$oldroot" "$newroot"
compare_timezone "TimeZone" "$oldroot" "$newroot"
compare_item "UMASK" "SYS_umask" "$oldroot" "$newroot"
compare_item "SVC_systemctl_failed" "SVC_systemctl_failed" "$oldroot" "$newroot"
compare_item "SVC_systemctl_running" "SVC_systemctl_running" "$oldroot" "$newroot"
compare_item "SVC_systemctl_enabled" "SVC_systemctl_enabled" "$oldroot" "$newroot"
compare_item "SVC_chkconfig" "SVC_chkconfig" "$oldroot" "$newroot"
compare_item "CRON_root" "CRON_root" "$oldroot" "$newroot"
compare_item "NET_ifconfig" "NET_ifconfig" "$oldroot" "$newroot"
compare_item "NET_ip_addr" "NET_ip_addr" "$oldroot" "$newroot"
compare_item "NET_ip_route" "NET_ip_route" "$oldroot" "$newroot"
compare_item "NET_netstat_nr" "NET_netstat_nr" "$oldroot" "$newroot"
compare_item "HW_lscpu" "HW_lscpu" "$oldroot" "$newroot"
compare_item "HW_lsblk" "HW_lsblk" "$oldroot" "$newroot"
compare_item "HW_dmidecode_sys" "HW_dmidecode_sys" "$oldroot" "$newroot"
compare_item "HW_dmidecode_base" "HW_dmidecode_base" "$oldroot" "$newroot"
compare_item "HW_dmidecode_chas" "HW_dmidecode_chas" "$oldroot" "$newroot"
compare_item "HW_dmidecode_proc" "HW_dmidecode_proc" "$oldroot" "$newroot"
compare_item "HW_dmidecode_mem" "HW_dmidecode_mem" "$oldroot" "$newroot"
compare_item "PKG_list" "PKG_list" "$oldroot" "$newroot"
# sysctl compare (filtered important)
compare_item "SYS_sysctl_imp" "SYS_sysctl_imp" "$oldroot" "$newroot"
# Azure derived files (parsed from AZ_imds_pretty)
compare_item "AZ_Compute" "AZ_Compute" "$oldroot" "$newroot"
compare_item "AZ_Image_Reference" "AZ_Image_Reference" "$oldroot" "$newroot"
compare_item "AZ_Tags" "AZ_Tags" "$oldroot" "$newroot"
compare_item "AZ_Network" "AZ_Network" "$oldroot" "$newroot"
compare_item "AZ_Storage" "AZ_Storage" "$oldroot" "$newroot"
rm -rf "$tmpbase"
}
# -------- Actions --------
usage() {
cat <<EOF
Primary Usage : ./linux_healthcheck.sh [ -h | -i | -c | -C | -d | -g | -l | -q | -u | -v | --tgz-content ]
---------- : ------------------------------------------------------------
Options : Are designed for general long term usage.
-h : Print help information.
-i : Add entry in crontab to execute script periodically.
-c : Create a new snapshot and compare it to the previous one.
-C : Display all available snapshots and chose which 2 to compare.
-d : Display all available snapshots.
-g : Create a new snapshot.
-q : Compare quietly the latest 2 snapshots. Doesn't display differences.
-l : Create or compare snapshots in the current directory.
-u : Remove entry from crontab.
-v : Print version.
Secondary Usage : ./linux_healthcheck.sh [ start [ <change_number> ] | stop | compare | clean ]
---------- : ------------------------------------------------------------
Options : Are designed for usage during changes.
start : to use before implementing change to create initial snapshot.
: change_number : is necessary to activate the maintenance flag.
stop : creates a new snapshot and will compare it with the oldest archived snapshot.
compare : allows to compare any of the recorded snapshots.
clean : to use at the end of the change to clean all snapshots.
EOF
}
install_cron() {
require_root_for_cron
local script_path
script_path="$(readlink -f "$0" 2>/dev/null || realpath "$0")"
local line="${CRON_SCHEDULE} ${script_path} -g >/dev/null 2>&1"
(crontab -l 2>/dev/null || true) \
| grep -vF "$script_path" \
| grep -vF "$CRON_TAG" \
> /tmp/.cron.$$ || true
{
cat /tmp/.cron.$$ 2>/dev/null || true
echo "$CRON_TAG"
echo "$line"
} | crontab -
rm -f /tmp/.cron.$$ || true
msg -s "Crontab entry added."
}
uninstall_cron() {
require_root_for_cron
local script_path
script_path="$(readlink -f "$0" 2>/dev/null || realpath "$0")"
(crontab -l 2>/dev/null || true) \
| grep -vF "$script_path" \
| grep -vF "$CRON_TAG" \
| crontab -
msg -s "Crontab entry removed."
}
compare_latest_two_quiet() {
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
if [[ ! -f "$base" || ! -f "${base}.1" ]]; then
msg -w "Not enough snapshots to compare quietly."
return 1
fi
if diff -q <(tar -xOf "$base" 2>/dev/null | sha256sum) <(tar -xOf "${base}.1" 2>/dev/null | sha256sum) >/dev/null 2>&1; then
exit 0
else
exit 2
fi
}
pick_two_and_compare() {
list_archives || return 1
echo
read -r -p "Enter NEW index: " nidx
read -r -p "Enter OLD index: " oidx
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
local files=()
[[ -f "$base" ]] && files+=("$base")
while IFS= read -r f; do files+=("$f"); done < <(ls -1 "${base}."[0-9]* 2>/dev/null || true)
mapfile -t files < <(
printf "%s\n" "${files[@]}" \
| xargs -I{} bash -lc 'printf "%s\t%s\n" "$(stat -c %Y "{}")" "{}"' \
| sort -rn \
| awk -F'\t' '{print $2}'
)
local new_arc="${files[$nidx]:-}"
local old_arc="${files[$oidx]:-}"
if [[ -z "$new_arc" || -z "$old_arc" ]]; then
msg -e "Invalid selection indexes."
return 1
fi
local new_epoch old_epoch new_h old_h
new_epoch="$(stat -c %Y "$new_arc")"
old_epoch="$(stat -c %Y "$old_arc")"
new_h="$(date -d "@$new_epoch" '+%Y_%b_%d_%H:%M:%S')"
old_h="$(date -d "@$old_epoch" '+%Y_%b_%d_%H:%M:%S')"
msg -d "Processing : Comparing ${new_h} (NEW) with ${old_h} (OLD)."
compare_two_archives_summary "$new_arc" "$old_arc" || true
}
secondary_start() {
local change_no="${1:-No_Change_Number}"
local owner
owner="$(id -un)"
msg -d "Start of change ${change_no} (Owner: ${owner})"
create_snapshot
msg -s "Server vitals were gathered."
list_archives || true
local host base
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
cp -f "$base" "${base}.START" 2>/dev/null || true
echo "$change_no" > "${RUN_DIR}/change_number.txt" 2>/dev/null || true
}
secondary_stop() {
local change_no="${1:-No_Change_Number}"
local owner
owner="$(id -un)"
msg -d "Stop of change ${change_no} (Owner: ${owner})"
create_snapshot
msg -s "Server vitals were gathered."
list_archives || true
local new_arc old_arc
new_arc="$(get_newest_archive || true)"
old_arc="$(get_oldest_archive_in_dir || true)"
if [[ -z "$new_arc" || -z "$old_arc" ]]; then
msg -e "Archive missing for compare."
return 1
fi
if [[ "$old_arc" == "$new_arc" ]]; then
msg -w "Only one snapshot exists; cannot compare."
return 1
fi
local new_epoch old_epoch new_h old_h
new_epoch="$(stat -c %Y "$new_arc")"
old_epoch="$(stat -c %Y "$old_arc")"
new_h="$(date -d "@$new_epoch" '+%Y_%b_%d_%H:%M:%S')"
old_h="$(date -d "@$old_epoch" '+%Y_%b_%d_%H:%M:%S')"
msg -d "Processing : Comparing ${new_h} (NEW) with ${old_h} (OLD)."
compare_two_archives_summary "$new_arc" "$old_arc" || true
}
secondary_clean() {
local dir="${RUN_DIR_DEFAULT}"
if [[ -z "$dir" || "$dir" == "/" || "$dir" == "." ]]; then
msg -e "Refusing to clean unsafe directory path: '$dir'"
exit 1
fi
[[ "$dir" != "/tmp/UNIX_Change" ]] && {
msg -e "Refusing to clean unexpected path: '$dir'"
exit 1
}
rm -rf "$dir" 2>/dev/null || true
msg -s "Cleaned snapshot directory: $dir"
}
secondary_compare() { pick_two_and_compare; }
# -------- Main option parsing --------
use_current_dir=0
if [[ $# -eq 0 ]]; then
usage
exit 0
fi
# Support both short opts and long opts (--tgz-content) using getopts with "-"
while getopts ":hicCdgqluv-" opt; do
case "$opt" in
-)
case "${OPTARG}" in
tgz-content) FORCE_TGZ_CONTENT=1 ;;
*) msg -e "Unknown option --${OPTARG}. Use -h for help."; exit 1 ;;
esac
;;
h) usage; exit 0 ;;
v) echo "$VERSION"; exit 0 ;;
l) RUN_DIR="$(pwd)"; use_current_dir=1 ;;
g) create_snapshot; exit 0 ;;
d) list_archives; exit 0 ;;
c)
create_snapshot
host="$(host_short)"
base="${RUN_DIR}/vitals_${host}.tgz"
if [[ -f "$base" && -f "${base}.1" ]]; then
new_epoch="$(stat -c %Y "$base")"
old_epoch="$(stat -c %Y "${base}.1")"
new_h="$(date -d "@$new_epoch" '+%Y_%b_%d_%H:%M:%S')"
old_h="$(date -d "@$old_epoch" '+%Y_%b_%d_%H:%M:%S')"
msg -d "Processing : Comparing ${new_h} (NEW) with ${old_h} (OLD)."
compare_two_archives_summary "$base" "${base}.1" || true
else
msg -w "Not enough snapshots to compare."
fi
exit 0
;;
C) pick_two_and_compare; exit 0 ;;
q) compare_latest_two_quiet ;;
i) install_cron; exit 0 ;;
u) uninstall_cron; exit 0 ;;
\?) break ;;
esac
done
shift $((OPTIND-1))
cmd="${1:-}"
case "$cmd" in
start)
RUN_DIR="$RUN_DIR_DEFAULT"
shift || true
secondary_start "${1:-No_Change_Number}"
;;
stop)
RUN_DIR="$RUN_DIR_DEFAULT"
shift || true
secondary_stop "${1:-No_Change_Number}"
;;
compare)
RUN_DIR="$RUN_DIR_DEFAULT"
secondary_compare
;;
clean)
RUN_DIR="$RUN_DIR_DEFAULT"
secondary_clean
;;
*)
msg -e "Unknown option/command. Use -h for help."
exit 1
;;
esac