| #################################### |
| # LogSentinelAI Configuration File # |
| #################################### |
|
|
| # ============================================================================= |
| # Telegram Alert Configuration |
| # ============================================================================= |
| # Enable/Disable Telegram notifications (true/false) |
| # When false, all Telegram alert processing is skipped for better performance |
| TELEGRAM_ENABLED=true |
|
|
| # Telegram Alert Level - Minimum severity level to trigger alerts |
| # Choose from: CRITICAL, HIGH, MEDIUM, LOW, INFO |
| # Only events with this severity level or higher will trigger Telegram alerts |
| # Example: If set to HIGH, only CRITICAL and HIGH events will send alerts |
| # Example: If set to MEDIUM, CRITICAL, HIGH, and MEDIUM events will send alerts |
| TELEGRAM_ALERT_LEVEL=CRITICAL |
|
|
| # Telegram Bot Token (required for alerting to Telegram group) |
| # Get from @BotFather after creating your bot |
| TELEGRAM_TOKEN=YOUR_TELEGRAM_BOT_TOKEN_HERE |
|
|
| # Telegram Chat ID (group or user) |
| # Use @userinfobot or similar to get your group chat ID (negative number for groups) |
| TELEGRAM_CHAT_ID=YOUR_TELEGRAM_CHAT_ID_HERE |
|
|
| # ============================================================================= |
| # Logging Configuration |
| # ============================================================================= |
|
|
| # LOG_LEVEL: Set logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) |
| # Example: LOG_LEVEL=INFO |
| LOG_LEVEL=INFO |
|
|
| # LOG_FILE: Path to log file (leave empty for console only) |
| # Example: LOG_FILE=logsentinelai.log |
| LOG_FILE=/var/log/logsentinelai.log |
|
|
| # =============================================================================== |
| # API Keys |
| # ============================================================================= |
| # OpenAI API Key (required if using OpenAI provider) |
| OPENAI_API_KEY=YOUR_OPENAI_API_KEY_HERE |
|
|
| # Gemini API Key (required if using Gemini provider) |
| GEMINI_API_KEY=AIzaSyCtQ-SyLUn7dyorwD_Nzq8dWLQjqV2EKb |
|
|
| # ============================================================================= |
| # LLM Configuration |
| # ============================================================================= |
| # LLM Provider - Choose from "ollama", "vllm", "openai", "gemini" |
| LLM_PROVIDER=ollama |
| #LLM_PROVIDER=vllm |
| #LLM_PROVIDER=gemini |
| #LLM_PROVIDER=openai |
|
|
| # LLM Model - Ollama |
| LLM_MODEL_OLLAMA=qwen3:8b |
| #LLM_MODEL_OLLAMA=gemma3:4b |
| #LLM_MODEL_OLLAMA=qwen2.5:1.5b |
| #LLM_MODEL_OLLAMA=qwen3:1.7b |
|
|
| # LLM Model - vLLM |
| LLM_MODEL_VLLM=Qwen/Qwen2.5-1.5B-Instruct |
|
|
| # LLM Model - Gemini |
| LLM_MODEL_GEMINI=gemini-1.5-pro |
| #LLM_MODEL_GEMINI=gemini-2.5-flash-lite |
|
|
| # LLM Model - OpenAI |
| #LLM_MODEL_OPENAI=gpt-4o-mini |
| LLM_MODEL_OPENAI=gpt-4.1-nano |
|
|
| # Ollama API Host Configuration (default: http: |
| # (OpenAI API compatibility) |
| LLM_API_HOST_OLLAMA=http: |
|
|
| # vLLM API host (default: http: |
| # (OpenAI API compatibility) |
| LLM_API_HOST_VLLM=http: |
|
|
| # OpenAI API host (default: https: |
| LLM_API_HOST_OPENAI=https: |
|
|
| # Gemini API host |
| # (OpenAI API compatibility) |
| LLM_API_HOST_GEMINI=https: |
|
|
| # LLM Generation Parameters |
| # Temperature: Controls randomness (0.0 = deterministic, 1.0 = very random) |
| # Recommended: 0.0-0.1 for log analysis (consistency), 0.7-0.9 for creative tasks |
| LLM_TEMPERATURE=0.1 |
|
|
| # Top-p: Controls diversity via nucleus sampling (0.1-1.0) |
| # Lower values = more focused, Higher values = more diverse |
| LLM_TOP_P=0.3 |
|
|
| # Max Tokens: Maximum number of tokens to generate in response |
| # Controls response length and prevents infinite generation |
| # Recommended: 1024-4096 for log analysis depending on complexity |
| LLM_MAX_TOKENS=4096 |
|
|
| # Show Prompt: Display the full prompt sent to LLM before processing (true/false) |
| # Set to true to see the complete prompt for debugging and transparency |
| # Useful for debugging prompt issues and understanding what's sent to the LLM |
| LLM_SHOW_PROMPT=true |
| |
| # ============================================================================= |
| # Analysis Configuration |
| # ============================================================================= |
| # Response Language - Choose from "english", "korean", etc.. |
| RESPONSE_LANGUAGE=english |
| |
| # Analysis Mode - Choose from "batch", "realtime" |
| # batch: Analyze complete log files (existing functionality) |
| # realtime: Monitor and analyze new log entries as they are written |
| ANALYSIS_MODE=batch |
| |
| # ============================================================================= |
| # Log File Paths |
| # ============================================================================= |
| # Default log file paths used when --log-path is not specified |
| LOG_PATH_HTTPD_ACCESS=/workspace/LogSentinelAI/sample-logs/access-10k.log |
| LOG_PATH_HTTPD_SERVER=/workspace/LogSentinelAI/sample-logs/apache-10k.log |
| LOG_PATH_LINUX_SYSTEM=/workspace/LogSentinelAI/sample-logs/linux-2k.log |
| LOG_PATH_GENERAL_LOG=/var/log/secure |
| |
| # ============================================================================= |
| # Chunk Size Configuration (entries per chunk) |
| # ============================================================================= |
| # CHUNK_SIZE: Number of log entries sent to LLM for analysis in a single request |
| # - Controls analysis quality vs performance balance |
| # - Larger values: Better context understanding, slower processing |
| # - Smaller values: Faster processing, may miss complex patterns |
| # - Recommended: 10-50 depending on log complexity and LLM capacity |
| # - These values are used for both batch and real-time modes |
| # CLI Override: Use --chunk-size argument to override these values |
| # Example: python analysis-linux-system-log.py --chunk-size 20 |
| CHUNK_SIZE_HTTPD_ACCESS=10 |
| CHUNK_SIZE_HTTPD_SERVER=10 |
| CHUNK_SIZE_LINUX_SYSTEM=10 |
| CHUNK_SIZE_GENERAL_LOG=10 |
| |
| # ============================================================================= |
| # Elasticsearch Configuration |
| # ============================================================================= |
| ELASTICSEARCH_HOST=http://localhost:9200 |
| ELASTICSEARCH_USER=elastic |
| ELASTICSEARCH_PASSWORD=password |
| ELASTICSEARCH_INDEX=logsentinelai-analysis |
| |
| # ============================================================================= |
| # Real-time Monitoring Configuration |
| # ============================================================================= |
| # Remote Log Access Mode - Choose from "local", "ssh" |
| # local: Monitor local log files (default) |
| # ssh: Monitor remote log files via SSH connection |
| REMOTE_LOG_MODE=local |
| |
| # SSH Remote Server Configuration (only used when REMOTE_LOG_MODE=ssh) |
| # SSH connection details for remote log monitoring |
| REMOTE_SSH_HOST= |
| REMOTE_SSH_PORT=22 |
| REMOTE_SSH_USER= |
| # SSH Key Authentication (recommended for security) |
| REMOTE_SSH_KEY_PATH= |
| # Password Authentication (less secure, use only if SSH key is not available) |
| REMOTE_SSH_PASSWORD= |
| # SSH Connection Timeout (seconds) |
| REMOTE_SSH_TIMEOUT=10 |
| |
| # Polling interval for checking new log entries (seconds) |
| # - How often to check log files for new content |
| # - Lower values: More responsive, higher CPU usage |
| # - Higher values: Less responsive, lower CPU usage |
| REALTIME_POLLING_INTERVAL=5 |
| |
| # Maximum number of new lines to process at once (I/O efficiency control) |
| # - Limits memory usage and prevents system overload |
| # - Controls how many lines are read from file system per polling cycle |
| # - IMPORTANT: This is different from CHUNK_SIZE (which controls LLM analysis batching) |
| # - If 1000 new lines appear, this setting reads only 50 lines per cycle |
| # - Remaining lines are processed in subsequent polling cycles |
| # - Recommended: 20-100 depending on system resources and log volume |
| REALTIME_MAX_LINES_PER_BATCH=50 |
| |
| # Buffer time to wait for complete log lines (seconds) |
| # - Prevents processing incomplete log lines that are still being written |
| # - When new lines are detected, waits this many seconds before processing |
| # - Ensures log entries are completely written to disk before analysis |
| # - Higher values: More safety against incomplete lines, slower responsiveness |
| # - Lower values: Faster processing, risk of reading partial log entries |
| # - Recommended: 1-5 seconds depending on log writing frequency |
| REALTIME_BUFFER_TIME=2 |
| |
| # Real-time processing mode - Choose from "full", "sampling" |
| # full: Process all accumulated logs sequentially (default) |
| # sampling: Only process the most recent chunk_size logs, discard older ones |
| # CLI Override: Use --processing-mode argument to override this setting |
| # Example: python analysis-linux-system-log.py --mode realtime --processing-mode sampling |
| REALTIME_PROCESSING_MODE=full |
| |
| # Chunk pending timeout (seconds) for forcing pending logs to be processed |
| # - Maximum time to wait for chunk_size logs to accumulate before processing |
| # - If pending logs exist but chunk_size is not reached within this time, process anyway |
| # - Prevents pending logs from waiting indefinitely in low-volume environments |
| # - Set to 0 to disable timeout (wait indefinitely for full chunks) |
| # - Recommended: 600-3600 seconds (10-60 minutes) depending on log volume and analysis urgency |
| REALTIME_CHUNK_PENDING_TIMEOUT=600 |
| |
| # Auto-sampling threshold (applies to full mode) |
| # Automatically switch to sampling if accumulated lines exceed this number |
| # - When pending lines exceed this threshold, automatically switch to sampling mode |
| # - Prevents memory buildup in high-volume log environments |
| # - Works with CHUNK_SIZE: if CHUNK_SIZE=10 and threshold=100, keeps latest 10 lines |
| # CLI Override: Use --sampling-threshold argument to override this setting |
| # Example: python analysis-linux-system-log.py --mode realtime --sampling-threshold 200 |
| REALTIME_SAMPLING_THRESHOLD=100 |
| |
| # FLOW EXPLANATION: |
| # 1. Every REALTIME_POLLING_INTERVAL seconds, check log file for new lines |
| # 2. Read up to REALTIME_MAX_LINES_PER_BATCH lines from file system |
| # 3. Add these lines to pending buffer (internal queue) |
| # 4. When pending buffer reaches CHUNK_SIZE lines, send to LLM for analysis |
| # 5. If pending buffer exceeds REALTIME_SAMPLING_THRESHOLD, apply sampling |
| # 6. If pending logs exist but CHUNK_SIZE not reached within REALTIME_CHUNK_PENDING_TIMEOUT seconds, force process anyway |
| # |
| # Example with current settings: |
| # - Check file every 5 seconds |
| # - Read max 50 lines per check |
| # - Analyze when 10 lines accumulated (CHUNK_SIZE) |
| # - Apply sampling if 100+ lines pending (REALTIME_SAMPLING_THRESHOLD) |
| # - Force process pending logs after 1800 seconds (30 minutes) timeout (REALTIME_CHUNK_PENDING_TIMEOUT) |
| |
| # Note: Real-time monitoring will buffer new log lines until CHUNK_SIZE lines |
| # are accumulated before sending to LLM for analysis. This improves efficiency |
| # by avoiding single-line analysis calls. However, if logs don't reach CHUNK_SIZE |
| # within the timeout period, they will be processed anyway to prevent indefinite waiting. |
|
|
| # ============================================================================= |
| # GeoIP Configuration |
| # ============================================================================= |
|
|
| # Enable GeoIP enrichment of source IP addresses with city/location information |
| # When enabled, source_ips and related fields in analysis results will include city, country, and coordinates (geo_point) |
| GEOIP_ENABLED=true |
|
|
| # Path to MaxMind GeoLite2-City database file (.mmdb format) |
| # Download with: python -m logsentinelai.utils.geoip_downloader |
| # Default: ~/.logsentinelai/GeoLite2-City.mmdb (automatically downloaded if missing) |
| GEOIP_DATABASE_PATH=~/.logsentinelai/GeoLite2-City.mmdb |
|
|
| # Fallback country name for unknown IP addresses |
| GEOIP_FALLBACK_COUNTRY=Unknown |
|
|
| # Include private/internal IP addresses in GeoIP processing |
| # When false, private IPs (192.168.x.x, 10.x.x.x, etc.) are marked as "Private IP" |
| # When true, private IPs are processed through GeoIP database (usually results in "Unknown") |
| GEOIP_INCLUDE_PRIVATE_IPS=false |
|
|
| # Size of GeoIP lookup cache (number of IP addresses to cache) |
| # Higher values reduce database queries but consume more memory |
| GEOIP_CACHE_SIZE=1000 |
|
|