Add dev stacks
This commit is contained in:
0
stacks/network-mcp/collectors/common/__init__.py
Normal file
0
stacks/network-mcp/collectors/common/__init__.py
Normal file
55
stacks/network-mcp/collectors/common/es_auth.py
Normal file
55
stacks/network-mcp/collectors/common/es_auth.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import base64
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
def _clean(value: Optional[str]) -> str:
|
||||
"""
|
||||
Normalize values coming from env files where quotes might be preserved.
|
||||
"""
|
||||
if not value:
|
||||
return ""
|
||||
return value.strip().strip('"').strip()
|
||||
|
||||
|
||||
def resolve_api_key(api_id: Optional[str], api_key: Optional[str]) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
Accept various API key formats and return (api_id, api_key).
|
||||
Supported formats:
|
||||
- Explicit ES_API_ID and ES_API_KEY values.
|
||||
- ES_API_KEY that already contains \"id:key\".
|
||||
- ES_API_KEY that is the base64 encoding of \"id:key\".
|
||||
"""
|
||||
cleaned_id = _clean(api_id)
|
||||
cleaned_key = _clean(api_key)
|
||||
|
||||
if cleaned_id and cleaned_key:
|
||||
return cleaned_id, cleaned_key
|
||||
|
||||
if not cleaned_key:
|
||||
return None, None
|
||||
|
||||
# Raw "id:key" format
|
||||
if ":" in cleaned_key:
|
||||
potential_id, potential_key = cleaned_key.split(":", 1)
|
||||
if potential_id and potential_key:
|
||||
return potential_id, potential_key
|
||||
|
||||
# Base64 encoded "id:key" format
|
||||
try:
|
||||
decoded = base64.b64decode(cleaned_key, validate=True).decode()
|
||||
if ":" in decoded:
|
||||
potential_id, potential_key = decoded.split(":", 1)
|
||||
if potential_id and potential_key:
|
||||
return potential_id, potential_key
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None, None
|
||||
|
||||
|
||||
def build_api_key_header(api_id: str, api_key: str) -> str:
|
||||
"""
|
||||
Return the value for the Authorization header using ApiKey auth.
|
||||
"""
|
||||
token = base64.b64encode(f"{api_id}:{api_key}".encode()).decode()
|
||||
return f"ApiKey {token}"
|
||||
85
stacks/network-mcp/collectors/common/es_client.py
Normal file
85
stacks/network-mcp/collectors/common/es_client.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import os
|
||||
import time
|
||||
import urllib3
|
||||
from elasticsearch import Elasticsearch, helpers
|
||||
from .es_auth import resolve_api_key
|
||||
from .logging_config import setup_logging
|
||||
|
||||
# Suppress insecure request warnings if SSL verification is disabled
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
logger = setup_logging("es_client")
|
||||
|
||||
class ESClient:
|
||||
def __init__(self):
|
||||
self.url = os.getenv("ES_URL", "http://localhost:9200")
|
||||
env_api_id = os.getenv("ES_API_ID")
|
||||
env_api_key = os.getenv("ES_API_KEY")
|
||||
self.api_id, self.api_key = resolve_api_key(env_api_id, env_api_key)
|
||||
self.user = os.getenv("ES_USER", "elastic")
|
||||
self.password = os.getenv("ES_PASS", "changeme")
|
||||
self.verify_ssl = os.getenv("ES_VERIFY_SSL", "true").lower() == "true"
|
||||
|
||||
if self.api_id and self.api_key:
|
||||
# Use API key authentication
|
||||
self.client = Elasticsearch(
|
||||
self.url,
|
||||
api_key=(self.api_id, self.api_key),
|
||||
verify_certs=self.verify_ssl,
|
||||
ssl_show_warn=False
|
||||
)
|
||||
logger.info("Using Elasticsearch API key authentication.")
|
||||
else:
|
||||
# Fallback to basic auth
|
||||
self.client = Elasticsearch(
|
||||
self.url,
|
||||
basic_auth=(self.user, self.password),
|
||||
verify_certs=self.verify_ssl,
|
||||
ssl_show_warn=False
|
||||
)
|
||||
logger.info("Using Elasticsearch basic authentication.")
|
||||
|
||||
def check_connection(self):
|
||||
try:
|
||||
return self.client.info()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to Elasticsearch: {e}")
|
||||
raise
|
||||
|
||||
def bulk_index(self, actions):
|
||||
"""
|
||||
Bulk index a list of actions.
|
||||
actions: list of dicts compatible with elasticsearch.helpers.bulk
|
||||
"""
|
||||
if not actions:
|
||||
return 0, []
|
||||
|
||||
try:
|
||||
success, failed = helpers.bulk(self.client, actions, stats_only=False, raise_on_error=False)
|
||||
if failed:
|
||||
logger.warning(f"Bulk index had failures: {len(failed)} items failed.")
|
||||
for item in failed[:5]: # Log first 5 failures
|
||||
logger.warning(f"Failure sample: {item}")
|
||||
else:
|
||||
logger.info(f"Bulk index successful: {success} items.")
|
||||
return success, failed
|
||||
except Exception as e:
|
||||
logger.error(f"Bulk index exception: {e}")
|
||||
raise
|
||||
|
||||
def search_hosts(self, index="network-hosts", query=None, size=1000):
|
||||
"""
|
||||
Search for hosts in network-hosts index.
|
||||
"""
|
||||
if query is None:
|
||||
query = {"match_all": {}}
|
||||
|
||||
try:
|
||||
resp = self.client.search(index=index, query=query, size=size)
|
||||
return [hit["_source"] for hit in resp["hits"]["hits"]]
|
||||
except Exception as e:
|
||||
logger.error(f"Search failed: {e}")
|
||||
return []
|
||||
|
||||
def get_es_client():
|
||||
return ESClient()
|
||||
21
stacks/network-mcp/collectors/common/logging_config.py
Normal file
21
stacks/network-mcp/collectors/common/logging_config.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
def setup_logging(name: str = "collector") -> logging.Logger:
|
||||
"""
|
||||
Sets up a structured logger.
|
||||
"""
|
||||
logger = logging.getLogger(name)
|
||||
level = os.getenv("LOG_LEVEL", "INFO").upper()
|
||||
logger.setLevel(level)
|
||||
|
||||
if not logger.handlers:
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s [%(levelname)s] %(name)s: %(message)s'
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
return logger
|
||||
131
stacks/network-mcp/collectors/common/nmap_parser.py
Normal file
131
stacks/network-mcp/collectors/common/nmap_parser.py
Normal file
@@ -0,0 +1,131 @@
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET
|
||||
import shutil
|
||||
from typing import List, Dict, Optional
|
||||
from .logging_config import setup_logging
|
||||
|
||||
logger = setup_logging("nmap_parser")
|
||||
|
||||
def run_nmap_scan(ips: List[str], extra_args: Optional[List[str]] = None) -> List[Dict]:
|
||||
"""
|
||||
Run nmap on the given IPs and return a list of parsed host dicts.
|
||||
"""
|
||||
if not ips:
|
||||
return []
|
||||
|
||||
if not shutil.which("nmap"):
|
||||
logger.error("nmap binary not found in PATH")
|
||||
return []
|
||||
|
||||
# Default args: -oX - (XML to stdout)
|
||||
cmd = ["nmap", "-oX", "-"]
|
||||
if extra_args:
|
||||
cmd.extend(extra_args)
|
||||
|
||||
# Append IPs
|
||||
cmd.extend(ips)
|
||||
|
||||
logger.info(f"Running nmap command: {' '.join(cmd)}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
xml_output = result.stdout
|
||||
return parse_nmap_xml(xml_output)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Nmap failed: {e.stderr}")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error running nmap: {e}")
|
||||
return []
|
||||
|
||||
def parse_nmap_xml(xml_string: str) -> List[Dict]:
|
||||
"""
|
||||
Parse Nmap XML output into our internal host/port structure.
|
||||
"""
|
||||
try:
|
||||
root = ET.fromstring(xml_string)
|
||||
except ET.ParseError as e:
|
||||
logger.error(f"Failed to parse Nmap XML: {e}")
|
||||
return []
|
||||
|
||||
hosts = []
|
||||
|
||||
for host_node in root.findall("host"):
|
||||
# Helper to find basic info
|
||||
ip = None
|
||||
mac = None
|
||||
hostname = None
|
||||
vendor = None
|
||||
|
||||
# Addresses
|
||||
for addr in host_node.findall("address"):
|
||||
addr_type = addr.get("addrtype")
|
||||
if addr_type == "ipv4":
|
||||
ip = addr.get("addr")
|
||||
elif addr_type == "mac":
|
||||
mac = addr.get("addr")
|
||||
vendor = addr.get("vendor")
|
||||
|
||||
# Hostnames
|
||||
hostnames_node = host_node.find("hostnames")
|
||||
if hostnames_node is not None:
|
||||
# Pick first for now
|
||||
hn = hostnames_node.find("hostname")
|
||||
if hn is not None:
|
||||
hostname = hn.get("name")
|
||||
|
||||
# Ports
|
||||
ports = []
|
||||
ports_node = host_node.find("ports")
|
||||
if ports_node is not None:
|
||||
for port_node in ports_node.findall("port"):
|
||||
state_node = port_node.find("state")
|
||||
state = state_node.get("state") if state_node is not None else "unknown"
|
||||
|
||||
# Only care about open ports usually, but keep all for now if needed
|
||||
if state != "open":
|
||||
continue
|
||||
|
||||
port_id = int(port_node.get("portid"))
|
||||
protocol = port_node.get("protocol")
|
||||
|
||||
service_node = port_node.find("service")
|
||||
service_name = service_node.get("name") if service_node is not None else "unknown"
|
||||
product = service_node.get("product") if service_node is not None else None
|
||||
version = service_node.get("version") if service_node is not None else None
|
||||
|
||||
service_def = {
|
||||
"name": service_name,
|
||||
}
|
||||
if product: service_def["product"] = product
|
||||
if version: service_def["version"] = version
|
||||
|
||||
ports.append({
|
||||
"port": port_id,
|
||||
"proto": protocol,
|
||||
"state": state,
|
||||
"service": service_def
|
||||
})
|
||||
|
||||
# OS detection (basic)
|
||||
os_match = None
|
||||
os_node = host_node.find("os")
|
||||
if os_node is not None:
|
||||
os_match_node = os_node.find("osmatch")
|
||||
if os_match_node is not None:
|
||||
os_match = {
|
||||
"name": os_match_node.get("name"),
|
||||
"accuracy": os_match_node.get("accuracy")
|
||||
}
|
||||
|
||||
host_data = {
|
||||
"ip": ip,
|
||||
"mac": mac, # might be None if scanning remote segment
|
||||
"hostname": hostname,
|
||||
"vendor": vendor,
|
||||
"ports": ports,
|
||||
"os_match": os_match
|
||||
}
|
||||
hosts.append(host_data)
|
||||
|
||||
return hosts
|
||||
105
stacks/network-mcp/collectors/common/opnsense_client.py
Normal file
105
stacks/network-mcp/collectors/common/opnsense_client.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
import ipaddress
|
||||
from .logging_config import setup_logging
|
||||
|
||||
logger = setup_logging("opnsense_client")
|
||||
|
||||
class OPNsenseClient:
|
||||
def __init__(self):
|
||||
self.base_url = os.getenv("OPNSENSE_URL", "https://192.168.1.1").rstrip('/')
|
||||
self.api_key = os.getenv("OPNSENSE_API_KEY")
|
||||
self.api_secret = os.getenv("OPNSENSE_API_SECRET")
|
||||
self.verify_ssl = os.getenv("ES_VERIFY_SSL", "true").lower() == "true" # Reusing verify flag or add explicit OPNSENSE_VERIFY_SSL
|
||||
|
||||
if not self.api_key or not self.api_secret:
|
||||
logger.warning("OPNSENSE_API_KEY or OPNSENSE_API_SECRET not set. API calls will fail.")
|
||||
|
||||
def _get(self, endpoint, params=None):
|
||||
url = f"{self.base_url}{endpoint}"
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
auth=(self.api_key, self.api_secret),
|
||||
verify=self.verify_ssl,
|
||||
params=params,
|
||||
timeout=10
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch {url}: {e}")
|
||||
return {}
|
||||
|
||||
def get_dhcp_leases_v4(self):
|
||||
# Endpoint: /api/dhcpv4/leases/search
|
||||
# Note: 'search' endpoints in OPNsense often expect POST or GET with params for filtering.
|
||||
# Often a simple GET works for 'searchLeases' or similar.
|
||||
# Standard OPNsense API for leases might be under /api/dhcpv4/leases/searchLeases
|
||||
# Let's try the standard search endpoint.
|
||||
data = self._get("/api/dhcpv4/leases/searchLease")
|
||||
# API return structure usually: {"rows": [...], "total": ...}
|
||||
return data.get("rows", [])
|
||||
|
||||
def get_arp_table(self):
|
||||
# Endpoint: /api/diagnostics/arp/search
|
||||
# This endpoint returns the ARP table.
|
||||
data = self._get("/api/diagnostics/interface/getArp")
|
||||
# Structure varies, let's assume standard response list or rows
|
||||
# If the standard plugin is used, it might be /api/diagnostics/interface/getArp
|
||||
# Or /api/diagnostics/network/arp ...
|
||||
# NOTE: OPNsense API paths can be tricky. /api/diagnostics/interface/getArp is a common one.
|
||||
# It returns a list directly or a dict with rows.
|
||||
# Let's assume list of dicts or {"rows": []}
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
return data.get("rows", [])
|
||||
|
||||
def get_dns_overrides(self):
|
||||
# Endpoint: /api/unbound/settings/searchHostOverride
|
||||
data = self._get("/api/unbound/settings/searchHostOverride")
|
||||
return data.get("rows", [])
|
||||
|
||||
def get_vlan_networks(self):
|
||||
"""
|
||||
Build a list of IPv4 networks (CIDRs) from the routing table, grouped by interface description.
|
||||
"""
|
||||
routes = self._get("/api/diagnostics/interface/getRoutes")
|
||||
networks = []
|
||||
if not isinstance(routes, list):
|
||||
return networks
|
||||
|
||||
seen = set()
|
||||
for route in routes:
|
||||
if route.get("proto") != "ipv4":
|
||||
continue
|
||||
destination = route.get("destination")
|
||||
if not destination or "/" not in destination or destination == "default":
|
||||
continue
|
||||
desc = route.get("intf_description")
|
||||
if not desc:
|
||||
continue
|
||||
try:
|
||||
network = ipaddress.ip_network(destination, strict=False)
|
||||
except ValueError:
|
||||
continue
|
||||
# Skip host routes (/32) which are usually static peers
|
||||
if network.prefixlen == 32:
|
||||
continue
|
||||
if network.prefixlen < 16:
|
||||
continue
|
||||
|
||||
key = (desc, str(network))
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
networks.append({
|
||||
"key": desc,
|
||||
"name": desc,
|
||||
"cidr": str(network)
|
||||
})
|
||||
return networks
|
||||
|
||||
def get_opnsense_client():
|
||||
return OPNsenseClient()
|
||||
Reference in New Issue
Block a user