|
|
@@ -1,6 +1,12 @@
|
|
1
|
1
|
import asyncio
|
|
2
|
2
|
import logging
|
|
3
|
|
-from datetime import datetime, timezone
|
|
|
3
|
+import os
|
|
|
4
|
+import re
|
|
|
5
|
+import shlex
|
|
|
6
|
+import subprocess
|
|
|
7
|
+import uuid
|
|
|
8
|
+from collections import deque
|
|
|
9
|
+from datetime import datetime, timedelta, timezone
|
|
4
|
10
|
from pathlib import Path
|
|
5
|
11
|
|
|
6
|
12
|
from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile
|
|
|
@@ -25,6 +31,7 @@ from app.models import (
|
|
25
|
31
|
IrisTicketCreateRequest,
|
|
26
|
32
|
LogLossCheckRequest,
|
|
27
|
33
|
LogLossStreamCheck,
|
|
|
34
|
+ SimLogRunRequest,
|
|
28
|
35
|
ShuffleLoginRequest,
|
|
29
|
36
|
ShuffleProxyRequest,
|
|
30
|
37
|
TriggerShuffleRequest,
|
|
|
@@ -40,6 +47,8 @@ app = FastAPI(title=settings.app_name, version="0.1.0")
|
|
40
|
47
|
logger = logging.getLogger(__name__)
|
|
41
|
48
|
UI_DIR = Path(__file__).resolve().parent / "ui"
|
|
42
|
49
|
UI_ASSETS_DIR = UI_DIR / "assets"
|
|
|
50
|
+SIM_SCRIPTS_DIR = Path("/app/scripts")
|
|
|
51
|
+SIM_RUN_LOGS_DIR = Path("/tmp/soc-integrator-sim-logs")
|
|
43
|
52
|
|
|
44
|
53
|
wazuh_adapter = WazuhAdapter(
|
|
45
|
54
|
base_url=settings.wazuh_base_url,
|
|
|
@@ -357,6 +366,11 @@ async def startup() -> None:
|
|
357
|
366
|
"last_result": None,
|
|
358
|
367
|
"last_ticket_ts_by_key": {},
|
|
359
|
368
|
}
|
|
|
369
|
+ app.state.systems_monitor_state = {
|
|
|
370
|
+ "last_ok_at": {},
|
|
|
371
|
+ }
|
|
|
372
|
+ app.state.sim_runs = {}
|
|
|
373
|
+ SIM_RUN_LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
|
360
|
374
|
if settings.wazuh_auto_sync_enabled:
|
|
361
|
375
|
app.state.wazuh_auto_sync_task = asyncio.create_task(_wazuh_auto_sync_loop())
|
|
362
|
376
|
logger.info(
|
|
|
@@ -393,6 +407,11 @@ async def shutdown() -> None:
|
|
393
|
407
|
await ll_task
|
|
394
|
408
|
except asyncio.CancelledError:
|
|
395
|
409
|
pass
|
|
|
410
|
+ sim_runs = getattr(app.state, "sim_runs", {})
|
|
|
411
|
+ for run in sim_runs.values():
|
|
|
412
|
+ process = run.get("process")
|
|
|
413
|
+ if process and process.poll() is None:
|
|
|
414
|
+ process.terminate()
|
|
396
|
415
|
|
|
397
|
416
|
|
|
398
|
417
|
@app.get(
|
|
|
@@ -768,6 +787,266 @@ def _build_abuseipdb_ioc_result(
|
|
768
|
787
|
return result, matched, severity, confidence
|
|
769
|
788
|
|
|
770
|
789
|
|
|
|
790
|
+def _extract_first_array(payload: object) -> list[object]:
|
|
|
791
|
+ if isinstance(payload, list):
|
|
|
792
|
+ return payload
|
|
|
793
|
+ if not isinstance(payload, dict):
|
|
|
794
|
+ return []
|
|
|
795
|
+
|
|
|
796
|
+ preferred_keys = [
|
|
|
797
|
+ "items",
|
|
|
798
|
+ "results",
|
|
|
799
|
+ "workflows",
|
|
|
800
|
+ "apps",
|
|
|
801
|
+ "affected_items",
|
|
|
802
|
+ "data",
|
|
|
803
|
+ ]
|
|
|
804
|
+ for key in preferred_keys:
|
|
|
805
|
+ value = payload.get(key)
|
|
|
806
|
+ if isinstance(value, list):
|
|
|
807
|
+ return value
|
|
|
808
|
+
|
|
|
809
|
+ for value in payload.values():
|
|
|
810
|
+ extracted = _extract_first_array(value)
|
|
|
811
|
+ if extracted:
|
|
|
812
|
+ return extracted
|
|
|
813
|
+ return []
|
|
|
814
|
+
|
|
|
815
|
+
|
|
|
816
|
+SIM_SCRIPT_MAP: dict[str, str] = {
|
|
|
817
|
+ "fortigate": "send-wazuh-fortigate-test-events.sh",
|
|
|
818
|
+ "endpoint": "send-wazuh-endpoint-agent-test-events.sh",
|
|
|
819
|
+ "cisco": "send-wazuh-cisco-test-events.sh",
|
|
|
820
|
+ "proposal_required": "send-wazuh-proposal-required-events.sh",
|
|
|
821
|
+ "proposal_appendix_b": "send-wazuh-proposal-appendix-b-events.sh",
|
|
|
822
|
+ "proposal_appendix_c": "send-wazuh-proposal-appendix-c-events.sh",
|
|
|
823
|
+ "wazuh_test": "send-wazuh-test-events.sh",
|
|
|
824
|
+}
|
|
|
825
|
+
|
|
|
826
|
+
|
|
|
827
|
+def _build_sim_command(payload: SimLogRunRequest) -> list[str]:
|
|
|
828
|
+ script_name = SIM_SCRIPT_MAP[payload.script]
|
|
|
829
|
+ script_path = SIM_SCRIPTS_DIR / script_name
|
|
|
830
|
+ count = max(1, int(payload.count))
|
|
|
831
|
+ delay = max(0.0, float(payload.delay_seconds))
|
|
|
832
|
+
|
|
|
833
|
+ if payload.script == "endpoint":
|
|
|
834
|
+ cmd = [
|
|
|
835
|
+ "/bin/bash",
|
|
|
836
|
+ str(script_path),
|
|
|
837
|
+ payload.target or "all",
|
|
|
838
|
+ payload.scenario or "all",
|
|
|
839
|
+ str(count),
|
|
|
840
|
+ str(delay),
|
|
|
841
|
+ ]
|
|
|
842
|
+ else:
|
|
|
843
|
+ cmd = [
|
|
|
844
|
+ "/bin/bash",
|
|
|
845
|
+ str(script_path),
|
|
|
846
|
+ payload.target or "all",
|
|
|
847
|
+ str(count),
|
|
|
848
|
+ str(delay),
|
|
|
849
|
+ ]
|
|
|
850
|
+
|
|
|
851
|
+ if payload.forever:
|
|
|
852
|
+ cmd.append("--forever")
|
|
|
853
|
+ return cmd
|
|
|
854
|
+
|
|
|
855
|
+
|
|
|
856
|
+def _serialize_sim_run(run_id: str, run: dict[str, object]) -> dict[str, object]:
|
|
|
857
|
+ process = run.get("process")
|
|
|
858
|
+ poll_code = process.poll() if process else None
|
|
|
859
|
+ return_code = run.get("return_code")
|
|
|
860
|
+ if poll_code is not None and return_code is None:
|
|
|
861
|
+ run["return_code"] = poll_code
|
|
|
862
|
+ return_code = poll_code
|
|
|
863
|
+
|
|
|
864
|
+ return {
|
|
|
865
|
+ "run_id": run_id,
|
|
|
866
|
+ "script": run.get("script"),
|
|
|
867
|
+ "target": run.get("target"),
|
|
|
868
|
+ "scenario": run.get("scenario"),
|
|
|
869
|
+ "count": run.get("count"),
|
|
|
870
|
+ "delay_seconds": run.get("delay_seconds"),
|
|
|
871
|
+ "forever": run.get("forever"),
|
|
|
872
|
+ "pid": run.get("pid"),
|
|
|
873
|
+ "cmd": run.get("cmd"),
|
|
|
874
|
+ "started_at": run.get("started_at"),
|
|
|
875
|
+ "stopped_at": run.get("stopped_at"),
|
|
|
876
|
+ "running": bool(process and process.poll() is None),
|
|
|
877
|
+ "return_code": return_code,
|
|
|
878
|
+ "log_file": run.get("log_file"),
|
|
|
879
|
+ }
|
|
|
880
|
+
|
|
|
881
|
+
|
|
|
882
|
+def _tail_log_lines(path: Path, limit: int = 200) -> list[str]:
|
|
|
883
|
+ line_limit = max(1, min(int(limit), 1000))
|
|
|
884
|
+ lines: deque[str] = deque(maxlen=line_limit)
|
|
|
885
|
+ try:
|
|
|
886
|
+ with path.open("r", encoding="utf-8", errors="replace") as handle:
|
|
|
887
|
+ for line in handle:
|
|
|
888
|
+ lines.append(line.rstrip("\n"))
|
|
|
889
|
+ except FileNotFoundError:
|
|
|
890
|
+ return []
|
|
|
891
|
+ return list(lines)
|
|
|
892
|
+
|
|
|
893
|
+
|
|
|
894
|
+def _safe_query_token(value: object) -> str | None:
|
|
|
895
|
+ text = str(value or "").strip()
|
|
|
896
|
+ if not text:
|
|
|
897
|
+ return None
|
|
|
898
|
+ if not re.fullmatch(r"[A-Za-z0-9_.:-]+", text):
|
|
|
899
|
+ return None
|
|
|
900
|
+ return text
|
|
|
901
|
+
|
|
|
902
|
+
|
|
|
903
|
+def _parse_iso_datetime(value: object) -> datetime | None:
|
|
|
904
|
+ text = str(value or "").strip()
|
|
|
905
|
+ if not text:
|
|
|
906
|
+ return None
|
|
|
907
|
+ if text.endswith("Z"):
|
|
|
908
|
+ text = text[:-1] + "+00:00"
|
|
|
909
|
+ try:
|
|
|
910
|
+ parsed = datetime.fromisoformat(text)
|
|
|
911
|
+ except ValueError:
|
|
|
912
|
+ return None
|
|
|
913
|
+ if parsed.tzinfo is None:
|
|
|
914
|
+ parsed = parsed.replace(tzinfo=timezone.utc)
|
|
|
915
|
+ return parsed.astimezone(timezone.utc)
|
|
|
916
|
+
|
|
|
917
|
+
|
|
|
918
|
+def _sim_wazuh_query_clauses(run: dict[str, object]) -> list[str]:
|
|
|
919
|
+ script = str(run.get("script") or "").strip().lower()
|
|
|
920
|
+ target = str(run.get("target") or "all").strip()
|
|
|
921
|
+ scenario = str(run.get("scenario") or "all").strip().lower()
|
|
|
922
|
+ target_token = _safe_query_token(target)
|
|
|
923
|
+ _ = scenario
|
|
|
924
|
+
|
|
|
925
|
+ clauses: list[str] = ["(full_log:*soc_mvp_test=true* OR data.soc_mvp_test:true)"]
|
|
|
926
|
+ if script == "fortigate":
|
|
|
927
|
+ clauses.append(
|
|
|
928
|
+ "(full_log:*fortigate* OR full_log:*FGT80F* OR full_log:*FGT60F* OR full_log:*FGT40F* "
|
|
|
929
|
+ "OR full_log:*FGT501E* OR data.vendor:fortinet OR data.product:fortigate OR data.source:fortigate)"
|
|
|
930
|
+ )
|
|
|
931
|
+ if target_token and target_token.lower() != "all":
|
|
|
932
|
+ clauses.append(f"(full_log:*{target_token}* OR data.model:{target_token})")
|
|
|
933
|
+ elif script == "endpoint":
|
|
|
934
|
+ if target_token and target_token.lower() != "all":
|
|
|
935
|
+ lowered = target_token.lower()
|
|
|
936
|
+ if lowered in {"windows", "win"}:
|
|
|
937
|
+ clauses.append(
|
|
|
938
|
+ "(full_log:*source=windows* OR full_log:*source=windows_agent* "
|
|
|
939
|
+ "OR data.source:windows OR data.source:windows_agent OR data.platform:windows)"
|
|
|
940
|
+ )
|
|
|
941
|
+ elif lowered in {"mac", "macos"}:
|
|
|
942
|
+ clauses.append(
|
|
|
943
|
+ "(full_log:*source=mac* OR full_log:*source=mac_agent* "
|
|
|
944
|
+ "OR data.source:mac OR data.source:mac_agent OR data.platform:mac)"
|
|
|
945
|
+ )
|
|
|
946
|
+ elif lowered == "linux":
|
|
|
947
|
+ clauses.append(
|
|
|
948
|
+ "(full_log:*source=linux* OR full_log:*source=linux_agent* "
|
|
|
949
|
+ "OR data.source:linux OR data.source:linux_agent OR data.platform:linux)"
|
|
|
950
|
+ )
|
|
|
951
|
+ else:
|
|
|
952
|
+ clauses.append(f"full_log:*{target_token}*")
|
|
|
953
|
+ else:
|
|
|
954
|
+ clauses.append(
|
|
|
955
|
+ "(full_log:*source=windows* OR full_log:*source=windows_agent* "
|
|
|
956
|
+ "OR full_log:*source=mac* OR full_log:*source=mac_agent* "
|
|
|
957
|
+ "OR full_log:*source=linux* OR full_log:*source=linux_agent* "
|
|
|
958
|
+ "OR data.source:windows OR data.source:windows_agent "
|
|
|
959
|
+ "OR data.source:mac OR data.source:mac_agent "
|
|
|
960
|
+ "OR data.source:linux OR data.source:linux_agent)"
|
|
|
961
|
+ )
|
|
|
962
|
+ elif script == "cisco":
|
|
|
963
|
+ clauses.append("(full_log:*cisco* OR data.vendor:cisco)")
|
|
|
964
|
+ if target_token and target_token.lower() != "all":
|
|
|
965
|
+ clauses.append(f"full_log:*{target_token}*")
|
|
|
966
|
+ elif script in {"proposal_required", "proposal_appendix_b", "proposal_appendix_c", "wazuh_test"}:
|
|
|
967
|
+ clauses.append("(full_log:*soc_mvp_test=true* OR data.soc_mvp_test:true)")
|
|
|
968
|
+ if target_token and target_token.lower() != "all":
|
|
|
969
|
+ clauses.append(f"full_log:*{target_token}*")
|
|
|
970
|
+ else:
|
|
|
971
|
+ clauses.append("full_log:*soc_mvp_test=true*")
|
|
|
972
|
+
|
|
|
973
|
+ return clauses
|
|
|
974
|
+
|
|
|
975
|
+
|
|
|
976
|
+def _extract_wazuh_hits(payload: object) -> list[dict[str, object]]:
|
|
|
977
|
+ if not isinstance(payload, dict):
|
|
|
978
|
+ return []
|
|
|
979
|
+ hits_root = payload.get("hits")
|
|
|
980
|
+ if not isinstance(hits_root, dict):
|
|
|
981
|
+ return []
|
|
|
982
|
+ hits = hits_root.get("hits")
|
|
|
983
|
+ if not isinstance(hits, list):
|
|
|
984
|
+ return []
|
|
|
985
|
+ result: list[dict[str, object]] = []
|
|
|
986
|
+ for hit in hits:
|
|
|
987
|
+ if isinstance(hit, dict):
|
|
|
988
|
+ result.append(hit)
|
|
|
989
|
+ return result
|
|
|
990
|
+
|
|
|
991
|
+
|
|
|
992
|
+def _extract_wazuh_event_item(hit: dict[str, object], include_raw: bool) -> dict[str, object]:
|
|
|
993
|
+ source = hit.get("_source") if isinstance(hit.get("_source"), dict) else {}
|
|
|
994
|
+ source = source if isinstance(source, dict) else {}
|
|
|
995
|
+ agent = source.get("agent") if isinstance(source.get("agent"), dict) else {}
|
|
|
996
|
+ agent = agent if isinstance(agent, dict) else {}
|
|
|
997
|
+ decoder = source.get("decoder") if isinstance(source.get("decoder"), dict) else {}
|
|
|
998
|
+ decoder = decoder if isinstance(decoder, dict) else {}
|
|
|
999
|
+ data = source.get("data") if isinstance(source.get("data"), dict) else {}
|
|
|
1000
|
+ data = data if isinstance(data, dict) else {}
|
|
|
1001
|
+ rule = source.get("rule") if isinstance(source.get("rule"), dict) else {}
|
|
|
1002
|
+ rule = rule if isinstance(rule, dict) else {}
|
|
|
1003
|
+
|
|
|
1004
|
+ item: dict[str, object] = {
|
|
|
1005
|
+ "@timestamp": source.get("@timestamp") or source.get("timestamp"),
|
|
|
1006
|
+ "event_id": data.get("event_id") or source.get("id") or hit.get("_id"),
|
|
|
1007
|
+ "agent_name": agent.get("name"),
|
|
|
1008
|
+ "agent_id": agent.get("id"),
|
|
|
1009
|
+ "decoder_name": decoder.get("name"),
|
|
|
1010
|
+ "source": data.get("source"),
|
|
|
1011
|
+ "event_type": data.get("event_type"),
|
|
|
1012
|
+ "severity": data.get("severity"),
|
|
|
1013
|
+ "rule_id": rule.get("id"),
|
|
|
1014
|
+ "rule_description": rule.get("description"),
|
|
|
1015
|
+ "full_log": source.get("full_log"),
|
|
|
1016
|
+ }
|
|
|
1017
|
+ if include_raw:
|
|
|
1018
|
+ item["raw"] = source
|
|
|
1019
|
+ return item
|
|
|
1020
|
+
|
|
|
1021
|
+
|
|
|
1022
|
+def _extract_wazuh_rule_item(hit: dict[str, object], include_raw: bool) -> dict[str, object] | None:
|
|
|
1023
|
+ source = hit.get("_source") if isinstance(hit.get("_source"), dict) else {}
|
|
|
1024
|
+ source = source if isinstance(source, dict) else {}
|
|
|
1025
|
+ rule = source.get("rule") if isinstance(source.get("rule"), dict) else {}
|
|
|
1026
|
+ rule = rule if isinstance(rule, dict) else {}
|
|
|
1027
|
+ rule_id = rule.get("id")
|
|
|
1028
|
+ if rule_id in {None, ""}:
|
|
|
1029
|
+ return None
|
|
|
1030
|
+ agent = source.get("agent") if isinstance(source.get("agent"), dict) else {}
|
|
|
1031
|
+ agent = agent if isinstance(agent, dict) else {}
|
|
|
1032
|
+ data = source.get("data") if isinstance(source.get("data"), dict) else {}
|
|
|
1033
|
+ data = data if isinstance(data, dict) else {}
|
|
|
1034
|
+
|
|
|
1035
|
+ item: dict[str, object] = {
|
|
|
1036
|
+ "@timestamp": source.get("@timestamp") or source.get("timestamp"),
|
|
|
1037
|
+ "rule_id": rule_id,
|
|
|
1038
|
+ "rule_level": rule.get("level"),
|
|
|
1039
|
+ "rule_description": rule.get("description"),
|
|
|
1040
|
+ "rule_firedtimes": rule.get("firedtimes"),
|
|
|
1041
|
+ "event_id": data.get("event_id") or source.get("id") or hit.get("_id"),
|
|
|
1042
|
+ "agent_name": agent.get("name"),
|
|
|
1043
|
+ "full_log": source.get("full_log"),
|
|
|
1044
|
+ }
|
|
|
1045
|
+ if include_raw:
|
|
|
1046
|
+ item["raw"] = source
|
|
|
1047
|
+ return item
|
|
|
1048
|
+
|
|
|
1049
|
+
|
|
771
|
1050
|
@app.post(
|
|
772
|
1051
|
"/ioc/enrich",
|
|
773
|
1052
|
response_model=ApiResponse,
|
|
|
@@ -1104,6 +1383,17 @@ async def ioc_history(limit: int = 50, offset: int = 0) -> ApiResponse:
|
|
1104
|
1383
|
|
|
1105
|
1384
|
|
|
1106
|
1385
|
@app.get(
|
|
|
1386
|
+ "/geoip/{ip}",
|
|
|
1387
|
+ response_model=ApiResponse,
|
|
|
1388
|
+ summary="GeoIP lookup",
|
|
|
1389
|
+ description="Lookup geolocation for a public IP address using configured GeoIP provider.",
|
|
|
1390
|
+)
|
|
|
1391
|
+async def geoip_lookup(ip: str) -> ApiResponse:
|
|
|
1392
|
+ result = await geoip_adapter.lookup(ip)
|
|
|
1393
|
+ return ApiResponse(data={"geoip": result})
|
|
|
1394
|
+
|
|
|
1395
|
+
|
|
|
1396
|
+@app.get(
|
|
1107
|
1397
|
"/sync/wazuh-version",
|
|
1108
|
1398
|
response_model=ApiResponse,
|
|
1109
|
1399
|
summary="Wazuh version",
|
|
|
@@ -1249,6 +1539,402 @@ async def wazuh_auto_sync_status() -> ApiResponse:
|
|
1249
|
1539
|
)
|
|
1250
|
1540
|
|
|
1251
|
1541
|
|
|
|
1542
|
+@app.get(
|
|
|
1543
|
+ "/monitor/systems",
|
|
|
1544
|
+ response_model=ApiResponse,
|
|
|
1545
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1546
|
+ summary="Systems monitor overview",
|
|
|
1547
|
+ description="Unified monitoring snapshot for Wazuh, Shuffle, IRIS, and PagerDuty with pipeline KPIs and recent records.",
|
|
|
1548
|
+)
|
|
|
1549
|
+async def monitor_systems(
|
|
|
1550
|
+ minutes: int = 60,
|
|
|
1551
|
+ limit: int = 20,
|
|
|
1552
|
+ include_raw: bool = False,
|
|
|
1553
|
+) -> ApiResponse:
|
|
|
1554
|
+ window_minutes = max(1, minutes)
|
|
|
1555
|
+ row_limit = max(1, limit)
|
|
|
1556
|
+ now = datetime.now(timezone.utc)
|
|
|
1557
|
+ since = now - timedelta(minutes=window_minutes)
|
|
|
1558
|
+ now_iso = now.isoformat()
|
|
|
1559
|
+
|
|
|
1560
|
+ dependencies = await mvp_service.dependency_health()
|
|
|
1561
|
+ monitor_state = getattr(app.state, "systems_monitor_state", {"last_ok_at": {}})
|
|
|
1562
|
+ last_ok_at_by_key = monitor_state.setdefault("last_ok_at", {})
|
|
|
1563
|
+
|
|
|
1564
|
+ # KPI counters from persisted database records in the selected lookback window.
|
|
|
1565
|
+ alerts_ingested = repo.count_incident_events_since(since=since, source="wazuh")
|
|
|
1566
|
+ detections_matched = repo.count_c_detection_events_since(since=since)
|
|
|
1567
|
+ iris_tickets_created = repo.count_incidents_with_iris_since(since=since)
|
|
|
1568
|
+ pagerduty_escalations_sent = repo.count_escalations_since(since=since, success=True)
|
|
|
1569
|
+ pagerduty_escalations_failed = repo.count_escalations_since(since=since, success=False)
|
|
|
1570
|
+
|
|
|
1571
|
+ wazuh_recent: list[object] = []
|
|
|
1572
|
+ wazuh_recent_error: str | None = None
|
|
|
1573
|
+ try:
|
|
|
1574
|
+ wazuh_resp = await wazuh_adapter.list_manager_logs(limit=row_limit, offset=0, q=None, sort=None)
|
|
|
1575
|
+ wazuh_recent = _extract_first_array(wazuh_resp)[:row_limit]
|
|
|
1576
|
+ except Exception as exc:
|
|
|
1577
|
+ wazuh_recent_error = str(exc)
|
|
|
1578
|
+
|
|
|
1579
|
+ shuffle_recent: list[object] = []
|
|
|
1580
|
+ shuffle_recent_error: str | None = None
|
|
|
1581
|
+ try:
|
|
|
1582
|
+ workflows_resp = await shuffle_adapter.list_workflows()
|
|
|
1583
|
+ workflows = _extract_first_array(workflows_resp)
|
|
|
1584
|
+ for item in workflows[:row_limit]:
|
|
|
1585
|
+ if isinstance(item, dict):
|
|
|
1586
|
+ shuffle_recent.append(
|
|
|
1587
|
+ {
|
|
|
1588
|
+ "id": item.get("id") or item.get("workflow_id"),
|
|
|
1589
|
+ "name": item.get("name") or item.get("workflow", {}).get("name"),
|
|
|
1590
|
+ "status": item.get("status"),
|
|
|
1591
|
+ }
|
|
|
1592
|
+ )
|
|
|
1593
|
+ else:
|
|
|
1594
|
+ shuffle_recent.append(item)
|
|
|
1595
|
+ except Exception as exc:
|
|
|
1596
|
+ shuffle_recent_error = str(exc)
|
|
|
1597
|
+
|
|
|
1598
|
+ iris_recent: list[object] = []
|
|
|
1599
|
+ iris_recent_error: str | None = None
|
|
|
1600
|
+ try:
|
|
|
1601
|
+ iris_resp = await iris_adapter.list_cases(limit=row_limit, offset=0)
|
|
|
1602
|
+ iris_recent = _extract_first_array(iris_resp)[:row_limit]
|
|
|
1603
|
+ except Exception as exc:
|
|
|
1604
|
+ iris_recent_error = str(exc)
|
|
|
1605
|
+
|
|
|
1606
|
+ pagerduty_recent = repo.list_recent_escalations(limit=row_limit)
|
|
|
1607
|
+
|
|
|
1608
|
+ def build_card(
|
|
|
1609
|
+ label: str,
|
|
|
1610
|
+ dependency_key: str,
|
|
|
1611
|
+ recent: list[object],
|
|
|
1612
|
+ kpis: dict[str, object],
|
|
|
1613
|
+ extra_error: str | None = None,
|
|
|
1614
|
+ ) -> dict[str, object]:
|
|
|
1615
|
+ dep = dependencies.get(dependency_key, {})
|
|
|
1616
|
+ dep_status = str(dep.get("status") or "down")
|
|
|
1617
|
+ status = "ok" if dep_status == "up" else "down"
|
|
|
1618
|
+
|
|
|
1619
|
+ if dep_status == "up":
|
|
|
1620
|
+ last_ok_at_by_key[label] = now_iso
|
|
|
1621
|
+
|
|
|
1622
|
+ error_parts: list[str] = []
|
|
|
1623
|
+ if dep.get("error"):
|
|
|
1624
|
+ error_parts.append(str(dep.get("error")))
|
|
|
1625
|
+ if extra_error:
|
|
|
1626
|
+ error_parts.append(extra_error)
|
|
|
1627
|
+
|
|
|
1628
|
+ if dep_status == "up" and extra_error:
|
|
|
1629
|
+ status = "degraded"
|
|
|
1630
|
+
|
|
|
1631
|
+ card: dict[str, object] = {
|
|
|
1632
|
+ "status": status,
|
|
|
1633
|
+ "latency_ms": dep.get("latency_ms"),
|
|
|
1634
|
+ "last_ok_at": last_ok_at_by_key.get(label),
|
|
|
1635
|
+ "last_error": " | ".join(error_parts) if error_parts else None,
|
|
|
1636
|
+ "kpis": kpis,
|
|
|
1637
|
+ "recent": recent,
|
|
|
1638
|
+ }
|
|
|
1639
|
+ if include_raw:
|
|
|
1640
|
+ card["raw"] = dep.get("details")
|
|
|
1641
|
+ return card
|
|
|
1642
|
+
|
|
|
1643
|
+ cards = {
|
|
|
1644
|
+ "wazuh": build_card(
|
|
|
1645
|
+ label="wazuh",
|
|
|
1646
|
+ dependency_key="wazuh",
|
|
|
1647
|
+ recent=wazuh_recent,
|
|
|
1648
|
+ extra_error=wazuh_recent_error,
|
|
|
1649
|
+ kpis={
|
|
|
1650
|
+ "alerts_ingested": alerts_ingested,
|
|
|
1651
|
+ "recent_rows": len(wazuh_recent),
|
|
|
1652
|
+ },
|
|
|
1653
|
+ ),
|
|
|
1654
|
+ "shuffle": build_card(
|
|
|
1655
|
+ label="shuffle",
|
|
|
1656
|
+ dependency_key="shuffle",
|
|
|
1657
|
+ recent=shuffle_recent,
|
|
|
1658
|
+ extra_error=shuffle_recent_error,
|
|
|
1659
|
+ kpis={
|
|
|
1660
|
+ "recent_workflows": len(shuffle_recent),
|
|
|
1661
|
+ },
|
|
|
1662
|
+ ),
|
|
|
1663
|
+ "iris": build_card(
|
|
|
1664
|
+ label="iris",
|
|
|
1665
|
+ dependency_key="iris",
|
|
|
1666
|
+ recent=iris_recent,
|
|
|
1667
|
+ extra_error=iris_recent_error,
|
|
|
1668
|
+ kpis={
|
|
|
1669
|
+ "tickets_created": iris_tickets_created,
|
|
|
1670
|
+ "recent_rows": len(iris_recent),
|
|
|
1671
|
+ },
|
|
|
1672
|
+ ),
|
|
|
1673
|
+ "pagerduty": build_card(
|
|
|
1674
|
+ label="pagerduty",
|
|
|
1675
|
+ dependency_key="pagerduty_stub",
|
|
|
1676
|
+ recent=pagerduty_recent,
|
|
|
1677
|
+ kpis={
|
|
|
1678
|
+ "escalations_sent": pagerduty_escalations_sent,
|
|
|
1679
|
+ "escalations_failed": pagerduty_escalations_failed,
|
|
|
1680
|
+ },
|
|
|
1681
|
+ ),
|
|
|
1682
|
+ }
|
|
|
1683
|
+
|
|
|
1684
|
+ app.state.systems_monitor_state = monitor_state
|
|
|
1685
|
+
|
|
|
1686
|
+ return ApiResponse(
|
|
|
1687
|
+ data={
|
|
|
1688
|
+ "generated_at": now_iso,
|
|
|
1689
|
+ "window_minutes": window_minutes,
|
|
|
1690
|
+ "cards": cards,
|
|
|
1691
|
+ "pipeline": {
|
|
|
1692
|
+ "alerts_ingested": alerts_ingested,
|
|
|
1693
|
+ "detections_matched": detections_matched,
|
|
|
1694
|
+ "iris_tickets_created": iris_tickets_created,
|
|
|
1695
|
+ "pagerduty_escalations_sent": pagerduty_escalations_sent,
|
|
|
1696
|
+ "pagerduty_escalations_failed": pagerduty_escalations_failed,
|
|
|
1697
|
+ },
|
|
|
1698
|
+ }
|
|
|
1699
|
+ )
|
|
|
1700
|
+
|
|
|
1701
|
+
|
|
|
1702
|
+@app.get(
|
|
|
1703
|
+ "/sim/logs/runs",
|
|
|
1704
|
+ response_model=ApiResponse,
|
|
|
1705
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1706
|
+ summary="List simulator runs",
|
|
|
1707
|
+ description="List active and recent simulator script runs started from soc-integrator.",
|
|
|
1708
|
+)
|
|
|
1709
|
+async def sim_logs_runs() -> ApiResponse:
|
|
|
1710
|
+ sim_runs: dict[str, dict[str, object]] = getattr(app.state, "sim_runs", {})
|
|
|
1711
|
+ items: list[dict[str, object]] = []
|
|
|
1712
|
+ for run_id, run in sim_runs.items():
|
|
|
1713
|
+ serialized = _serialize_sim_run(run_id, run)
|
|
|
1714
|
+ if (not serialized["running"]) and not run.get("stopped_at"):
|
|
|
1715
|
+ run["stopped_at"] = datetime.now(timezone.utc).isoformat()
|
|
|
1716
|
+ serialized["stopped_at"] = run["stopped_at"]
|
|
|
1717
|
+ items.append(serialized)
|
|
|
1718
|
+ items.sort(key=lambda x: str(x.get("started_at") or ""), reverse=True)
|
|
|
1719
|
+ return ApiResponse(data={"items": items})
|
|
|
1720
|
+
|
|
|
1721
|
+
|
|
|
1722
|
+@app.post(
|
|
|
1723
|
+ "/sim/logs/start",
|
|
|
1724
|
+ response_model=ApiResponse,
|
|
|
1725
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1726
|
+ summary="Start simulator logs script",
|
|
|
1727
|
+ description="Start a whitelisted simulator script in background and return run metadata.",
|
|
|
1728
|
+)
|
|
|
1729
|
+async def sim_logs_start(payload: SimLogRunRequest) -> ApiResponse:
|
|
|
1730
|
+ script_name = SIM_SCRIPT_MAP[payload.script]
|
|
|
1731
|
+ script_path = SIM_SCRIPTS_DIR / script_name
|
|
|
1732
|
+ if not script_path.exists():
|
|
|
1733
|
+ raise HTTPException(status_code=400, detail=f"Simulator script not found in container: {script_name}")
|
|
|
1734
|
+
|
|
|
1735
|
+ cmd = _build_sim_command(payload)
|
|
|
1736
|
+ env = dict(os.environ)
|
|
|
1737
|
+ env.setdefault("WAZUH_SYSLOG_HOST", "wazuh.manager")
|
|
|
1738
|
+ env.setdefault("WAZUH_SYSLOG_PORT", "514")
|
|
|
1739
|
+ run_id = str(uuid.uuid4())
|
|
|
1740
|
+ log_file = SIM_RUN_LOGS_DIR / f"{run_id}.log"
|
|
|
1741
|
+ log_handle = None
|
|
|
1742
|
+
|
|
|
1743
|
+ try:
|
|
|
1744
|
+ log_handle = log_file.open("ab")
|
|
|
1745
|
+ process = subprocess.Popen(
|
|
|
1746
|
+ cmd,
|
|
|
1747
|
+ cwd=str(SIM_SCRIPTS_DIR),
|
|
|
1748
|
+ env=env,
|
|
|
1749
|
+ stdout=log_handle,
|
|
|
1750
|
+ stderr=subprocess.STDOUT,
|
|
|
1751
|
+ start_new_session=True,
|
|
|
1752
|
+ )
|
|
|
1753
|
+ except Exception as exc:
|
|
|
1754
|
+ if log_handle:
|
|
|
1755
|
+ try:
|
|
|
1756
|
+ log_handle.close()
|
|
|
1757
|
+ except Exception:
|
|
|
1758
|
+ pass
|
|
|
1759
|
+ raise HTTPException(status_code=502, detail=f"Failed to start simulator: {exc}") from exc
|
|
|
1760
|
+ finally:
|
|
|
1761
|
+ if log_handle:
|
|
|
1762
|
+ log_handle.close()
|
|
|
1763
|
+ sim_runs: dict[str, dict[str, object]] = getattr(app.state, "sim_runs", {})
|
|
|
1764
|
+ sim_runs[run_id] = {
|
|
|
1765
|
+ "script": payload.script,
|
|
|
1766
|
+ "target": payload.target,
|
|
|
1767
|
+ "scenario": payload.scenario,
|
|
|
1768
|
+ "count": payload.count,
|
|
|
1769
|
+ "delay_seconds": payload.delay_seconds,
|
|
|
1770
|
+ "forever": payload.forever,
|
|
|
1771
|
+ "pid": process.pid,
|
|
|
1772
|
+ "cmd": " ".join(shlex.quote(part) for part in cmd),
|
|
|
1773
|
+ "started_at": datetime.now(timezone.utc).isoformat(),
|
|
|
1774
|
+ "stopped_at": None,
|
|
|
1775
|
+ "return_code": None,
|
|
|
1776
|
+ "log_file": str(log_file),
|
|
|
1777
|
+ "process": process,
|
|
|
1778
|
+ }
|
|
|
1779
|
+ app.state.sim_runs = sim_runs
|
|
|
1780
|
+ return ApiResponse(data={"run": _serialize_sim_run(run_id, sim_runs[run_id])})
|
|
|
1781
|
+
|
|
|
1782
|
+
|
|
|
1783
|
+@app.post(
|
|
|
1784
|
+ "/sim/logs/stop/{run_id}",
|
|
|
1785
|
+ response_model=ApiResponse,
|
|
|
1786
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1787
|
+ summary="Stop simulator run",
|
|
|
1788
|
+ description="Stop a running simulator script by run_id.",
|
|
|
1789
|
+)
|
|
|
1790
|
+async def sim_logs_stop(run_id: str) -> ApiResponse:
|
|
|
1791
|
+ sim_runs: dict[str, dict[str, object]] = getattr(app.state, "sim_runs", {})
|
|
|
1792
|
+ run = sim_runs.get(run_id)
|
|
|
1793
|
+ if not run:
|
|
|
1794
|
+ raise HTTPException(status_code=404, detail=f"Run not found: {run_id}")
|
|
|
1795
|
+
|
|
|
1796
|
+ process = run.get("process")
|
|
|
1797
|
+ if process and process.poll() is None:
|
|
|
1798
|
+ try:
|
|
|
1799
|
+ process.terminate()
|
|
|
1800
|
+ process.wait(timeout=3)
|
|
|
1801
|
+ except subprocess.TimeoutExpired:
|
|
|
1802
|
+ process.kill()
|
|
|
1803
|
+ except Exception as exc:
|
|
|
1804
|
+ raise HTTPException(status_code=502, detail=f"Failed to stop run: {exc}") from exc
|
|
|
1805
|
+
|
|
|
1806
|
+ run["stopped_at"] = datetime.now(timezone.utc).isoformat()
|
|
|
1807
|
+ return ApiResponse(data={"run": _serialize_sim_run(run_id, run)})
|
|
|
1808
|
+
|
|
|
1809
|
+
|
|
|
1810
|
+@app.post(
|
|
|
1811
|
+ "/sim/logs/stop-running",
|
|
|
1812
|
+ response_model=ApiResponse,
|
|
|
1813
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1814
|
+ summary="Stop all running simulator runs",
|
|
|
1815
|
+ description="Stop all currently running simulator scripts (including forever mode).",
|
|
|
1816
|
+)
|
|
|
1817
|
+async def sim_logs_stop_running() -> ApiResponse:
|
|
|
1818
|
+ sim_runs: dict[str, dict[str, object]] = getattr(app.state, "sim_runs", {})
|
|
|
1819
|
+ stopped: list[dict[str, object]] = []
|
|
|
1820
|
+ already_stopped = 0
|
|
|
1821
|
+
|
|
|
1822
|
+ for run_id, run in sim_runs.items():
|
|
|
1823
|
+ process = run.get("process")
|
|
|
1824
|
+ if process and process.poll() is None:
|
|
|
1825
|
+ try:
|
|
|
1826
|
+ process.terminate()
|
|
|
1827
|
+ process.wait(timeout=3)
|
|
|
1828
|
+ except subprocess.TimeoutExpired:
|
|
|
1829
|
+ process.kill()
|
|
|
1830
|
+ except Exception as exc:
|
|
|
1831
|
+ raise HTTPException(status_code=502, detail=f"Failed to stop run {run_id}: {exc}") from exc
|
|
|
1832
|
+ run["stopped_at"] = datetime.now(timezone.utc).isoformat()
|
|
|
1833
|
+ stopped.append(_serialize_sim_run(run_id, run))
|
|
|
1834
|
+ else:
|
|
|
1835
|
+ already_stopped += 1
|
|
|
1836
|
+
|
|
|
1837
|
+ return ApiResponse(
|
|
|
1838
|
+ data={
|
|
|
1839
|
+ "stopped_count": len(stopped),
|
|
|
1840
|
+ "already_stopped_count": already_stopped,
|
|
|
1841
|
+ "runs": stopped,
|
|
|
1842
|
+ }
|
|
|
1843
|
+ )
|
|
|
1844
|
+
|
|
|
1845
|
+
|
|
|
1846
|
+@app.get(
|
|
|
1847
|
+ "/sim/logs/output/{run_id}",
|
|
|
1848
|
+ response_model=ApiResponse,
|
|
|
1849
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1850
|
+ summary="Get simulator run output",
|
|
|
1851
|
+ description="Return tailed output lines from simulator run log file.",
|
|
|
1852
|
+)
|
|
|
1853
|
+async def sim_logs_output(run_id: str, limit: int = 200) -> ApiResponse:
|
|
|
1854
|
+ sim_runs: dict[str, dict[str, object]] = getattr(app.state, "sim_runs", {})
|
|
|
1855
|
+ run = sim_runs.get(run_id)
|
|
|
1856
|
+ if not run:
|
|
|
1857
|
+ raise HTTPException(status_code=404, detail=f"Run not found: {run_id}")
|
|
|
1858
|
+
|
|
|
1859
|
+ log_file_path = run.get("log_file")
|
|
|
1860
|
+ if not log_file_path:
|
|
|
1861
|
+ raise HTTPException(status_code=404, detail=f"No log file for run: {run_id}")
|
|
|
1862
|
+
|
|
|
1863
|
+ log_file = Path(str(log_file_path))
|
|
|
1864
|
+ lines = _tail_log_lines(log_file, limit=limit)
|
|
|
1865
|
+ process = run.get("process")
|
|
|
1866
|
+ running = bool(process and process.poll() is None)
|
|
|
1867
|
+ return ApiResponse(
|
|
|
1868
|
+ data={
|
|
|
1869
|
+ "run_id": run_id,
|
|
|
1870
|
+ "running": running,
|
|
|
1871
|
+ "line_count": len(lines),
|
|
|
1872
|
+ "lines": lines,
|
|
|
1873
|
+ "text": "\n".join(lines),
|
|
|
1874
|
+ "log_file": str(log_file),
|
|
|
1875
|
+ }
|
|
|
1876
|
+ )
|
|
|
1877
|
+
|
|
|
1878
|
+
|
|
|
1879
|
+@app.get(
|
|
|
1880
|
+ "/sim/logs/wazuh-latest/{run_id}",
|
|
|
1881
|
+ response_model=ApiResponse,
|
|
|
1882
|
+ dependencies=[Depends(require_internal_api_key)],
|
|
|
1883
|
+ summary="Get latest Wazuh logs/rules for simulator run",
|
|
|
1884
|
+ description="Return latest Wazuh event logs and matched rules correlated to a simulator run.",
|
|
|
1885
|
+)
|
|
|
1886
|
+async def sim_logs_wazuh_latest(
|
|
|
1887
|
+ run_id: str,
|
|
|
1888
|
+ limit: int = 50,
|
|
|
1889
|
+ minutes: int = 15,
|
|
|
1890
|
+ include_raw: bool = False,
|
|
|
1891
|
+) -> ApiResponse:
|
|
|
1892
|
+ sim_runs: dict[str, dict[str, object]] = getattr(app.state, "sim_runs", {})
|
|
|
1893
|
+ run = sim_runs.get(run_id)
|
|
|
1894
|
+ if not run:
|
|
|
1895
|
+ raise HTTPException(status_code=404, detail=f"Run not found: {run_id}")
|
|
|
1896
|
+
|
|
|
1897
|
+ requested_minutes = max(1, int(minutes))
|
|
|
1898
|
+ # Keep query unfiltered and use a wide lookback to emulate Discover "latest records".
|
|
|
1899
|
+ effective_minutes = max(1440, requested_minutes)
|
|
|
1900
|
+ query_limit = max(1, min(int(limit), 200))
|
|
|
1901
|
+ query_text = "*"
|
|
|
1902
|
+
|
|
|
1903
|
+ try:
|
|
|
1904
|
+ raw = await wazuh_adapter.search_alerts(
|
|
|
1905
|
+ query=query_text,
|
|
|
1906
|
+ limit=query_limit,
|
|
|
1907
|
+ minutes=effective_minutes,
|
|
|
1908
|
+ )
|
|
|
1909
|
+ except Exception as exc:
|
|
|
1910
|
+ raise HTTPException(status_code=502, detail=f"Wazuh search failed: {exc}") from exc
|
|
|
1911
|
+
|
|
|
1912
|
+ hits = _extract_wazuh_hits(raw)
|
|
|
1913
|
+ events = [_extract_wazuh_event_item(hit, include_raw=include_raw) for hit in hits]
|
|
|
1914
|
+ rules: list[dict[str, object]] = []
|
|
|
1915
|
+ for hit in hits:
|
|
|
1916
|
+ rule_item = _extract_wazuh_rule_item(hit, include_raw=include_raw)
|
|
|
1917
|
+ if rule_item:
|
|
|
1918
|
+ rules.append(rule_item)
|
|
|
1919
|
+
|
|
|
1920
|
+ return ApiResponse(
|
|
|
1921
|
+ data={
|
|
|
1922
|
+ "run": _serialize_sim_run(run_id, run),
|
|
|
1923
|
+ "query": {
|
|
|
1924
|
+ "effective_minutes": effective_minutes,
|
|
|
1925
|
+ "text": query_text,
|
|
|
1926
|
+ "limit": query_limit,
|
|
|
1927
|
+ },
|
|
|
1928
|
+ "events": events,
|
|
|
1929
|
+ "rules": rules,
|
|
|
1930
|
+ "totals": {
|
|
|
1931
|
+ "events": len(events),
|
|
|
1932
|
+ "rules": len(rules),
|
|
|
1933
|
+ },
|
|
|
1934
|
+ }
|
|
|
1935
|
+ )
|
|
|
1936
|
+
|
|
|
1937
|
+
|
|
1252
|
1938
|
@app.post(
|
|
1253
|
1939
|
"/monitor/log-loss/check",
|
|
1254
|
1940
|
response_model=ApiResponse,
|