From 583cf68b85b076f2c80e693ac494315058828531 Mon Sep 17 00:00:00 2001 From: Petar Aleksic Date: Sat, 9 Aug 2025 08:02:51 +0200 Subject: [PATCH 1/6] show messages and addresses decoded --- main/core/models/submit_log.py | 41 +++++++++++++++++++ .../templates/web/content/submit_logs.html | 8 ++-- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/main/core/models/submit_log.py b/main/core/models/submit_log.py index 689798d..d086173 100644 --- a/main/core/models/submit_log.py +++ b/main/core/models/submit_log.py @@ -27,6 +27,7 @@ ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; """ +import binascii from django.utils.translation import gettext as _ from django.db import models @@ -47,6 +48,46 @@ class SubmitLog(models.Model): created_at = models.DateTimeField(_("Created At"), null=False, db_index=True) status_at = models.DateTimeField(_("Status At"), null=False) + @property + def decoded_destination_addr(self): + if self.destination_addr: + # Remove the '\x' prefix and decode from hex to bytes, then to UTF-8 string + try: + # The data might already be in bytes format in Python + if isinstance(self.destination_addr, bytes): + return self.destination_addr.decode('utf-8') + # Otherwise, it might be a memoryview or hex string + elif isinstance(self.destination_addr, memoryview): + return self.destination_addr.tobytes().decode('utf-8') + # Handle the case where it's a string with \x prefix + elif isinstance(self.destination_addr, str) and self.destination_addr.startswith('\\x'): + hex_string = self.destination_addr[2:] # Remove \x prefix + return binascii.unhexlify(hex_string).decode('utf-8') + return str(self.destination_addr) + except (UnicodeDecodeError, binascii.Error): + return "Undecodable" + return "N/A" + + @property + def decoded_short_message(self): + if self.short_message: + # Remove the '\x' prefix and decode from hex to bytes, then to UTF-8 string + try: + # The data might already be in bytes format in Python + if isinstance(self.short_message, bytes): + return self.short_message.decode('utf-8') + # Otherwise, it might be a memoryview or hex string + elif isinstance(self.short_message, memoryview): + return self.short_message.tobytes().decode('utf-8') + # Handle the case where it's a string with \x prefix + elif isinstance(self.short_message, str) and self.short_message.startswith('\\x'): + hex_string = self.short_message[2:] # Remove \x prefix + return binascii.unhexlify(hex_string).decode('utf-8') + return str(self.short_message) + except (UnicodeDecodeError, binascii.Error): + return "Undecodable" + return "N/A" + class Meta: db_table = "submit_log" verbose_name = _("Submit Log") diff --git a/main/web/templates/web/content/submit_logs.html b/main/web/templates/web/content/submit_logs.html index 883edae..e365f60 100644 --- a/main/web/templates/web/content/submit_logs.html +++ b/main/web/templates/web/content/submit_logs.html @@ -15,9 +15,10 @@

- + + @@ -32,8 +33,9 @@

{% for record in submit_logs %}

- - + + + From 8a49de319e96f55800e0c0f543934184df0be78e Mon Sep 17 00:00:00 2001 From: Petar Aleksic Date: Tue, 12 Aug 2025 11:17:27 +0200 Subject: [PATCH 2/6] fix missing source addr --- main/core/models/submit_log.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/main/core/models/submit_log.py b/main/core/models/submit_log.py index d086173..22f929d 100644 --- a/main/core/models/submit_log.py +++ b/main/core/models/submit_log.py @@ -88,6 +88,26 @@ def decoded_short_message(self): return "Undecodable" return "N/A" + @property + def decoded_source_addr(self): + if self.source_addr: + # Remove the '\x' prefix and decode from hex to bytes, then to UTF-8 string + try: + # The data might already be in bytes format in Python + if isinstance(self.source_addr, bytes): + return self.source_addr.decode('utf-8') + # Otherwise, it might be a memoryview or hex string + elif isinstance(self.source_addr, memoryview): + return self.source_addr.tobytes().decode('utf-8') + # Handle the case where it's a string with \x prefix + elif isinstance(self.source_addr, str) and self.source_addr.startswith('\\x'): + hex_string = self.source_addr[2:] # Remove \x prefix + return binascii.unhexlify(hex_string).decode('utf-8') + return str(self.source_addr) + except (UnicodeDecodeError, binascii.Error): + return "Undecodable" + return "N/A" + class Meta: db_table = "submit_log" verbose_name = _("Submit Log") From 3bf4ba732640cd9136d4b5fe632d5cd5d884f969 Mon Sep 17 00:00:00 2001 From: 101t Date: Mon, 20 Oct 2025 22:35:46 +0300 Subject: [PATCH 3/6] submit_log report fixed, code optimized, README.md updated --- README.md | 575 +++- config/docker/alpine/Dockerfile | 48 - .../docker/alpine/docker-compose-alpine.yml | 34 - config/docker/jasmin/README.md | 6 - config/docker/jasmin/docker-compose.yml | 82 - config/docker/jasmin/jasmin/config/dlr.cfg | 75 - .../jasmin/jasmin/config/dlrlookupd.cfg | 142 - .../jasmin/jasmin/config/interceptor.cfg | 57 - config/docker/jasmin/jasmin/config/jasmin.cfg | 662 ---- config/docker/jasmin/jasmin/logs/.gitignore | 1 - .../resource/amqp0-8.stripped.rabbitmq.xml | 771 ----- .../jasmin/jasmin/resource/amqp0-9-1.xml | 2843 ----------------- config/docker/jasmin/jasmin/store/.gitignore | 0 config/docker/jasmin/redis/.gitignore | 1 - config/docker/sms_logger/Dockerfile | 50 - config/docker/sms_logger/docker-entrypoint.sh | 11 - config/docker/sms_logger/requirements.txt | 9 - config/docker/sms_logger/sms_logger.py | 434 --- docker-compose.yml | 93 +- .../templates/web/content/submit_logs.html | 46 +- main/web/views/content/submit_logs.py | 52 +- pyproject.toml | 2 +- 22 files changed, 555 insertions(+), 5439 deletions(-) delete mode 100644 config/docker/alpine/Dockerfile delete mode 100644 config/docker/alpine/docker-compose-alpine.yml delete mode 100644 config/docker/jasmin/README.md delete mode 100644 config/docker/jasmin/docker-compose.yml delete mode 100644 config/docker/jasmin/jasmin/config/dlr.cfg delete mode 100644 config/docker/jasmin/jasmin/config/dlrlookupd.cfg delete mode 100644 config/docker/jasmin/jasmin/config/interceptor.cfg delete mode 100644 config/docker/jasmin/jasmin/config/jasmin.cfg delete mode 100644 config/docker/jasmin/jasmin/logs/.gitignore delete mode 100644 config/docker/jasmin/jasmin/resource/amqp0-8.stripped.rabbitmq.xml delete mode 100644 config/docker/jasmin/jasmin/resource/amqp0-9-1.xml delete mode 100644 config/docker/jasmin/jasmin/store/.gitignore delete mode 100644 config/docker/jasmin/redis/.gitignore delete mode 100644 config/docker/sms_logger/Dockerfile delete mode 100755 config/docker/sms_logger/docker-entrypoint.sh delete mode 100644 config/docker/sms_logger/requirements.txt delete mode 100644 config/docker/sms_logger/sms_logger.py diff --git a/README.md b/README.md index 31ab0f1..ae98b63 100644 --- a/README.md +++ b/README.md @@ -1,73 +1,321 @@ # Jasmin Web Panel -

- travis-ci -

+
-Jasmin Web Application to manage [Jasmin SMS Gateway](https://github.com/jookies/jasmin) +[![Build Status](https://travis-ci.org/101t/jasmin-web-panel.svg?branch=master)](https://travis-ci.org/101t/jasmin-web-panel) +[![Docker Hub](https://img.shields.io/badge/docker-hub-blue.svg)](https://hub.docker.com/u/tarekaec) +[![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/) +[![License](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE) -### Table Of Contents: +**A modern, feature-rich web interface for managing [Jasmin SMS Gateway](https://github.com/jookies/jasmin)** -1. [Installing and Deployment](#installing-and-deployment) - - [Installation](#installation) - - [Deployment with NGiNX and Systemd](#deployment-with-nginx-and-systemd) - - [Deployment using Docker](#deployment-using-docker) - - [Submit Log](#submit-log) -2. [Release Notes](#release-notes) -3. [Tracking Issue](#tracking-issue) -4. [Contact Us](#contacts) +[Features](#-features) • [Quick Start](#-quick-start) • [Installation](#-installation) • [Docker](#-docker-deployment) • [Support](#-support) -## Installing and Deployment +
-Before starting please make sure you have installed and running [Jasmin SMS Gateway](http://docs.jasminsms.com/en/latest/installation/index.html) on your server. +--- -### Installation +## 📋 Table of Contents -Download and Extract folder We recommended installing python dependencies in `virtualenv` +- [Overview](#-overview) +- [Features](#-features) +- [Prerequisites](#-prerequisites) +- [Quick Start](#-quick-start) +- [Installation](#-installation) + - [Manual Installation](#manual-installation) + - [Docker Deployment](#-docker-deployment) + - [Docker Compose](#docker-compose-deployment) +- [Configuration](#-configuration) +- [Production Deployment](#-production-deployment) +- [Submit Log Integration](#-submit-log-integration) +- [Default Credentials](#-default-credentials) +- [Troubleshooting](#-troubleshooting) +- [Support](#-support) -Install dependencies: +--- -> This version using `python >= 3.11` make sure you have installed on your system. +## 🎯 Overview -go to `jasmin-web-panel/` and run +Jasmin Web Panel is a comprehensive web-based management interface for [Jasmin SMS Gateway](https://github.com/jookies/jasmin). Built with Django and modern web technologies, it provides an intuitive dashboard to configure, monitor, and manage your SMS operations efficiently. -```sh -cd jasmin-web-panel/ +--- + +## ✨ Features + +### Core Functionality +- 🚀 **Dashboard**: Real-time statistics and system health monitoring +- 👥 **User Management**: Create and manage users with role-based access control +- 📡 **SMPP Connectors**: Configure and monitor SMPP client/server connections +- 🌐 **HTTP API**: Manage HTTP connectors for sending SMS via REST API +- 🔀 **Message Routing**: Define routing rules and filters for message delivery +- 📨 **MO/MT Routers**: Configure Mobile Originated and Mobile Terminated message routing + +### Monitoring & Analytics +- 📊 **Submit Logs**: Comprehensive message tracking with advanced search and filtering + - Search by Message ID, Source/Destination address, UID, and content + - Filter by status: Success (`ESME_ROK`, `ESME_RINVNUMDESTS`), Failed (`ESME_RDELIVERYFAILURE`), Unknown + - Real-time statistics with color-coded status badges +- 🔍 **Service Monitoring**: Monitor Jasmin gateway service health +- 📈 **Real-time Status**: Live SMPP connector status monitoring + +### Advanced Features +- 🔧 **RESTful API**: Programmatic access to all management functions +- ⚡ **Rate Limiting**: Configure throughput limits per user/connector +- 🔒 **Multi-tenancy**: Manage multiple clients/users +- 📝 **Audit Logging**: Track all administrative actions +- 🌍 **Internationalization**: Multi-language support ready +- 📱 **Responsive Design**: Mobile-friendly interface + +--- + +## 📦 Prerequisites + +### Required Components +- **[Jasmin SMS Gateway](http://docs.jasminsms.com/en/latest/installation/index.html)**: v0.9+ installed and running +- **Python**: 3.11 or higher +- **Database**: PostgreSQL 12+ (recommended) or MySQL 8.0+ +- **Redis**: 6.0+ (for caching and Celery) +- **RabbitMQ**: 3.10+ (for message queuing) + +### System Requirements +- **OS**: Linux (Ubuntu 20.04+, Debian 11+, CentOS 8+) +- **RAM**: Minimum 2GB (4GB+ recommended for production) +- **Disk**: 10GB+ free space +- **Network**: Connectivity to Jasmin telnet interface (default: port 8990) + +--- + +## 🚀 Quick Start + +### Using Docker Compose (Recommended) + +```bash +# Clone the repository +git clone https://github.com/101t/jasmin-web-panel.git +cd jasmin-web-panel + +# Copy and configure environment file +cp sample.env .env +# Edit .env with your settings + +# Start all services +docker compose up -d + +# Access the web interface +open http://localhost:8999 +``` + +**Default credentials**: `admin` / `secret` ⚠️ **Change immediately after first login!** + +--- + +## 💻 Installation + +### Manual Installation + +#### 1. Clone and Setup Environment + +```bash +# Clone repository +git clone https://github.com/101t/jasmin-web-panel.git +cd jasmin-web-panel + +# Create virtual environment (recommended) +python3 -m venv env +source env/bin/activate # On Windows: env\Scripts\activate + +# Upgrade pip and install build tools pip install --upgrade pip wheel uv + +# Install dependencies uv pip install -r pyproject.toml --extra=prod -cp sample.env .env ``` -Preparing your `database` by running migrate commads: +#### 2. Configure Application -```sh -python manage.py migrate -python manage.py samples -python manage.py collectstatic --no-input +```bash +# Copy sample environment file +cp sample.env .env + +# Edit .env with your configuration +nano .env # or use your preferred editor ``` -These commands used in production server, also you may edit **Jasmin SMS Gateway** credential connection +**Essential configuration**: -```sh +```ini +# Django Settings +DEBUG=False # Always False in production +SECRET_KEY=your-very-long-random-secret-key-here +ALLOWED_HOSTS=yourdomain.com,www.yourdomain.com + +# Database +PRODB_URL=postgres://username:password@localhost:5432/jasmin_web_db + +# Jasmin Gateway Connection TELNET_HOST=127.0.0.1 TELNET_PORT=8990 TELNET_USERNAME=jcliadmin TELNET_PW=jclipwd TELNET_TIMEOUT=10 + +# Redis & Celery +REDIS_URL=redis://localhost:6379/0 +CELERY_BROKER_URL=amqp://guest:guest@localhost:5672// + +# Submit Log Feature +SUBMIT_LOG=True +``` + +#### 3. Initialize Database + +```bash +# Run migrations +python manage.py migrate + +# Load sample data (optional) +python manage.py samples + +# Collect static files +python manage.py collectstatic --no-input + +# Create superuser (optional) +python manage.py createsuperuser +``` + +#### 4. Run Development Server + +```bash +python manage.py runserver 0.0.0.0:8000 ``` -for production make sure `DEBUG=False` in `.env` file to ensure security. -You may run project manually +Access the application at `http://localhost:8000` + +--- + +## 🐳 Docker Deployment + +### Using Pre-built Image + +```bash +# Pull the latest image +docker pull tarekaec/jasmin_web_panel:1.4 + +# Configure environment +cp sample.env .env +# Edit .env with your settings + +# Run container +docker run -d \ + --name jasmin-web \ + -p 8999:8000 \ + --env-file .env \ + -v ./public:/app/public \ + tarekaec/jasmin_web_panel:1.4 +``` + +### Building Custom Image + +```bash +# Build from Dockerfile +docker build -f config/docker/slim/Dockerfile -t jasmin_web_panel:custom . + +# Run your custom image +docker run -d \ + --name jasmin-web \ + -p 8999:8000 \ + --env-file .env \ + jasmin_web_panel:custom +``` + +### Docker Compose Deployment + +Full stack deployment with all dependencies: + +```bash +# Ensure .env is configured +cp sample.env .env + +# Start all services +docker compose up -d + +# View logs +docker compose logs -f jasmin-web + +# Check service status +docker compose ps -```sh -python manage.py runserver +# Stop all services +docker compose down ``` -### Deployment with `NGiNX and Systemd` +**Services included**: +- `jasmin-web`: Web application (port 8999) +- `jasmin-celery`: Background task processor +- `db`: PostgreSQL database +- `redis`: Redis cache +- `rabbit-mq`: RabbitMQ message broker +- `jasmin`: Jasmin SMS Gateway (ports 2775, 8990, 1401) +- `sms_logger`: SMS submit log collector + +#### ARM64/AArch64 Support + +For ARM-based systems: + +1. Comment out line 38 in `config/docker/slim/Dockerfile`: + ```dockerfile + # ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 + ``` + +2. Start services: + ```bash + docker compose up -d + ``` + +--- + +## ⚙️ Configuration + +### Environment Variables Reference + +| Variable | Description | Default | Required | +|----------|-------------|---------|----------| +| `DEBUG` | Enable debug mode | `False` | ✅ | +| `SECRET_KEY` | Django secret key | - | ✅ | +| `ALLOWED_HOSTS` | Allowed hosts | `*` | ✅ | +| `PRODB_URL` | PostgreSQL URL | - | ✅ | +| `REDIS_URL` | Redis URL | `redis://redis:6379/0` | ✅ | +| `CELERY_BROKER_URL` | RabbitMQ URL | `amqp://guest:guest@rabbit-mq:5672//` | ✅ | +| `TELNET_HOST` | Jasmin telnet host | `127.0.0.1` | ✅ | +| `TELNET_PORT` | Jasmin telnet port | `8990` | ✅ | +| `TELNET_USERNAME` | Jasmin admin username | `jcliadmin` | ✅ | +| `TELNET_PW` | Jasmin admin password | `jclipwd` | ✅ | +| `SUBMIT_LOG` | Enable submit log tracking | `False` | ❌ | + +### Jasmin Gateway Configuration + +Ensure Jasmin is configured properly: + +1. Enable `submit_sm_resp` publishing in `jasmin.cfg`: + ```ini + [sm-listener] + publish_submit_sm_resp = True + ``` + +2. Restart Jasmin: + ```bash + systemctl restart jasmin + ``` + +--- + +## 🚀 Production Deployment -> Make sure you have installed `gunicorn` using `pip`. +### Nginx & Systemd Setup -Navigate to `/etc/systemd/system` and create new service called `jasmin-web.service` +#### 1. Create Systemd Service + +Create `/etc/systemd/system/jasmin-web.service`: ```ini [Unit] @@ -78,169 +326,226 @@ After=network.target postgresql.service [Service] Type=simple SyslogIdentifier=jasminwebpanel -PermissionsStartOnly=true -User=username -Group=username -Environment="DJANGO_SETTINGS_MODULE=config.settings.pro" +User=www-data +Group=www-data WorkingDirectory=/opt/jasmin-web-panel -ExecStart=/opt/jasmin-web-panel/env/bin/gunicorn --bind 127.0.0.1:8000 config.wsgi -w 3 --timeout=120 --log-level=info -StandardOutput=file:/opt/jasmin-web-panel/logs/gunicorn.log -StandardError=file:/opt/jasmin-web-panel/logs/gunicorn_error.log -StandardOutput=journal+console +Environment="DJANGO_SETTINGS_MODULE=config.settings.pro" +ExecStart=/opt/jasmin-web-panel/env/bin/gunicorn \ + --bind 127.0.0.1:8000 \ + --workers 4 \ + --timeout 120 \ + --log-level info \ + --access-logfile /opt/jasmin-web-panel/logs/gunicorn.log \ + --error-logfile /opt/jasmin-web-panel/logs/gunicorn_error.log \ + config.wsgi:application Restart=on-failure +RestartSec=10 [Install] WantedBy=multi-user.target ``` -Reload systemd +#### 2. Enable and Start Service -```sh +```bash sudo systemctl daemon-reload -``` - -Now, you can do: - -```sh sudo systemctl enable jasmin-web.service sudo systemctl start jasmin-web.service -``` - -To ensure web app running without issue: - -```sh sudo systemctl status jasmin-web.service ``` -For NGiNX go to `/etc/nginx/sites-available` and create a new file `jasmin_web` +#### 3. Configure Nginx + +Create `/etc/nginx/sites-available/jasmin_web`: ```nginx -upstream jasmin_web{ +upstream jasmin_web { server 127.0.0.1:8000; } server { listen 80; + server_name sms.yourdomain.com; # Replace with your domain charset utf-8; - # server_name sms.example.com; - server_name _; # for IP Address access - client_body_timeout 500; - client_header_timeout 500; - keepalive_timeout 500 500; - send_timeout 30; + + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + + # Logging access_log /var/log/nginx/jasmin_web_access.log combined; error_log /var/log/nginx/jasmin_web_error.log; - + + # Static files + location /static/ { + alias /opt/jasmin-web-panel/public/static/; + expires 30d; + add_header Cache-Control "public, immutable"; + } + + # Media files + location /media/ { + alias /opt/jasmin-web-panel/public/media/; + } + + # Proxy to Django location / { proxy_pass http://jasmin_web; proxy_http_version 1.1; - proxy_read_timeout 86400; - proxy_redirect off; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Host $server_name; - proxy_max_temp_file_size 1600m; proxy_buffering off; - proxy_request_buffering on; client_max_body_size 20M; - client_body_buffer_size 256K; - } - - location ^~ /media/ { - root /opt/jasmin-web-panel/public/; - add_header Accept-Ranges bytes; - } - location ^~ /static/ { - root /opt/jasmin-web-panel/public/; - add_header Pragma public; - add_header Cache-Control "public"; - expires 30d; } } ``` -> Note: Don't forget to replace `sms.example.com` with your real domain - -Once you are done, test and restart the Nginx Service with: +#### 4. Enable Nginx Configuration -```sh -ln -s /etc/nginx/sites-available/jasmin_web /etc/nginx/sites-enabled/jasmin_web +```bash +sudo ln -s /etc/nginx/sites-available/jasmin_web /etc/nginx/sites-enabled/ sudo nginx -t -sudo nginx -s reload -# or sudo service nginx restart -# or sudo systemctl restart nginx +sudo systemctl reload nginx ``` -### Login information: +#### 5. Setup SSL (Recommended) -```shell -Username: admin -Password: secret # please change the default password to avoid the security issue +```bash +sudo apt install certbot python3-certbot-nginx +sudo certbot --nginx -d sms.yourdomain.com ``` -## Deployment using Docker +--- -You could download the built image on [docker hub](https://hub.docker.com/u/tarekaec): +## 📊 Submit Log Integration -```shell -docker pull tarekaec/jasmin_web_panel -``` +Track all SMS messages submitted through Jasmin Gateway with detailed status information. -also, you could build it on your local machine by navigating to the project directory +### Setup Instructions -```shell -docker build -f config/docker/slim/Dockerfile -t jasmin_web_panel:latest . -``` +1. **Enable in configuration**: + ```ini + SUBMIT_LOG=True + ``` -You need to configure the environment variable in `.env` file +2. **Configure SMS Logger**: + ```ini + DB_HOST=db + DB_DATABASE=jasmin + DB_USER=jasmin + DB_PASS=jasmin + DB_TABLE=submit_log + ``` -```shell -DJANGO_SETTINGS_MODULE=config.settings.pro -PRODB_URL=postgres://username:strong_password@postgre_hostname:5432/jasmin_web_db -``` - -to start docker container +### Features -```shell -docker stack deploy -c docker-compose.yml jasmin1 -``` +- ✅ **Real-time Tracking**: Monitor message submission and delivery status +- 🔍 **Advanced Search**: Search by Message ID, addresses, UID, or content +- 🎯 **Status Filtering**: + - Success: `ESME_ROK`, `ESME_RINVNUMDESTS` + - Failed: `ESME_RDELIVERYFAILURE` + - Unknown: All other status codes +- 📈 **Statistics Dashboard**: View total, success, failed, and unknown counts +- 🎨 **Color-coded Badges**: Visual status identification +- 📄 **Pagination**: Handle large volumes efficiently -you could check service on terminal +--- -```shell -docker service ls | grep jasmin -``` +## 🔐 Default Credentials -## Deployment using Docker Compose (Works with AArch64 or ARM64) +⚠️ **SECURITY WARNING**: Change default credentials immediately after first login! -You need to configure the environment variable in `.env` file -You also need to comment line 38 of "config/docker/slim/Dockerfile" (ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so.2) -Then start docker container in detach mode. You can remove "-d" if you want to see logs ``` -docker compose up -d +Username: admin +Password: secret ``` -Then check docker containers +### Change Password + +**Via Web Interface**: +1. Log in with default credentials +2. Navigate to **Profile** → **Change Password** +3. Enter new secure password + +**Via Command Line**: +```bash +python manage.py changepassword admin ``` -docker ps + +--- + +## 🔧 Troubleshooting + +### Cannot connect to Jasmin Gateway + +**Solutions**: +- Verify Jasmin is running: `systemctl status jasmin` +- Check telnet connectivity: `telnet localhost 8990` +- Confirm `TELNET_*` settings match Jasmin configuration +- Ensure firewall allows port 8990 + +### Submit logs not appearing + +**Solutions**: +- Verify `SUBMIT_LOG=True` in `.env` +- Check SMS Logger service: `docker compose ps sms_logger` +- Confirm `publish_submit_sm_resp = True` in `jasmin.cfg` +- Check logs: `docker compose logs sms_logger` + +### Static files not loading + +**Solutions**: +```bash +python manage.py collectstatic --no-input --clear +sudo chown -R www-data:www-data /opt/jasmin-web-panel/public/ +sudo nginx -t && sudo systemctl reload nginx ``` -## Submit Log +### View Application Logs -To work with Submit Log you need to install and configure [Submit Log](https://github.com/101t/jasmin-submit-logs) service, make sure you have `SUBMIT_LOG` (default `False`) in environment variable: +```bash +# Docker Compose +docker compose logs -f jasmin-web -```shell -SUBMIT_LOG=True +# Systemd +sudo journalctl -u jasmin-web.service -f ``` -## Tracking Issue +--- + +## 💬 Support + +### Community Support + +- **Telegram**: Join our community → [https://t.me/jasminwebpanel](https://t.me/jasminwebpanel) +- **GitHub Issues**: [Report bugs or request features](https://github.com/101t/jasmin-web-panel/issues) +- **Email**: [tarek.it.eng@gmail.com](mailto:tarek.it.eng@gmail.com) + +### Contributing + +We welcome contributions! To contribute: + +1. Fork the repository +2. Create a feature branch: `git checkout -b feature/amazing-feature` +3. Commit your changes: `git commit -m 'Add amazing feature'` +4. Push to the branch: `git push origin feature/amazing-feature` +5. Open a Pull Request + +--- + +## 📄 License + +This project is licensed under the MIT License. See [LICENSE](LICENSE) file for details. + +--- + +
-You may submit issue [here](https://github.com/101t/jasmin-web-panel/issues) +**Made with ❤️ for the Jasmin SMS Gateway community** -## Contacts +[⬆ Back to Top](#jasmin-web-panel) -For question and suggestion: [tarek.it.eng@gmail.com](mailto:tarek.it.eng@gmail.com), Join Telegram Channel: [https://t.me/jasminwebpanel](https://t.me/jasminwebpanel), all suggestion and questions are welcomed. +
diff --git a/config/docker/alpine/Dockerfile b/config/docker/alpine/Dockerfile deleted file mode 100644 index b61871c..0000000 --- a/config/docker/alpine/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM alpine:3.11 - -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 -ENV JASMIN_HOME=/jasmin -ENV PATH="${PATH}:/jasmin" - -# RUN mkdir /jasmin -RUN addgroup -S jasmin && adduser -S jasmin -G jasmin -h $JASMIN_HOME - -#RUN apk del busybox-extras -RUN apk --update --no-cache upgrade -RUN apk add python3 --repository=http://dl-cdn.alpinelinux.org/alpine/v3.11/main && ln -sf python3 /usr/bin/python -# RUN apk search busybox-extras -RUN apk add busybox-extras -# RUN busybox --list -# RUN apk add --no-cache bash curl nmap apache2-utils bind-tools tcpdump mtr iperf3 strace tree busybox-extras netcat-openbsd -RUN echo alias telnet='busybox-extras telnet' >> .bashrc -RUN telnet google.com 80 - -RUN apk add --update build-base git gcc cmake py3-setuptools py3-pip python3-dev bash - -# RUN apk add --no-cache bash - -WORKDIR $JASMIN_HOME - -USER jasmin - -RUN mkdir -p $JASMIN_HOME/public/media -RUN mkdir -p $JASMIN_HOME/public/static - -# RUN chown -R jasmin:jasmin $JASMIN_HOME/ - -COPY --chown=jasmin:jasmin ./requirements.txt $JASMIN_HOME/requirements.txt - -ENV PATH="${PATH}:/jasmin/.local/bin" - -RUN pip3 install --upgrade pip && pip3 install -r requirements.txt - -COPY --chown=jasmin:jasmin . $JASMIN_HOME - -COPY --chown=jasmin:jasmin ./docker-entrypoint.sh docker-entrypoint.sh - -# RUN chown -R jasmin:jasmin $JASMIN_HOME/ - -# USER root - -ENTRYPOINT ["docker-entrypoint.sh"] \ No newline at end of file diff --git a/config/docker/alpine/docker-compose-alpine.yml b/config/docker/alpine/docker-compose-alpine.yml deleted file mode 100644 index d30b958..0000000 --- a/config/docker/alpine/docker-compose-alpine.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: '3.7' - -services: - jasmin_web: - image: tarekaec/jasmin_web_panel:1.0-alpine - ports: - - "8000:8000" - deploy: - replicas: 1 - env_file: - - .env - environment: - JASMIN_PORT: 8000 - healthcheck: - disable: true - volumes: - - ./public:/web/public - # entrypoint: /jasmin/docker-entrypoint.sh - jasmin_celery: - image: tarekaec/jasmin_web_panel:1.0-alpine - deploy: - replicas: 1 - env_file: - - .env - environment: - DEBUG: 0 - healthcheck: - disable: true - depends_on: - - jasmin_redis - entrypoint: /jasmin/celery_run.sh - jasmin_redis: - image: redis:alpine - tty: true diff --git a/config/docker/jasmin/README.md b/config/docker/jasmin/README.md deleted file mode 100644 index 1c17067..0000000 --- a/config/docker/jasmin/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Jasmin SMS Gateway -This is the docker version of jasmin sms gateway - -```shell -docker stack deploy -c config/docker/jasmin/docker-compose.yml jasmin -``` \ No newline at end of file diff --git a/config/docker/jasmin/docker-compose.yml b/config/docker/jasmin/docker-compose.yml deleted file mode 100644 index ff78140..0000000 --- a/config/docker/jasmin/docker-compose.yml +++ /dev/null @@ -1,82 +0,0 @@ -version: "3" - -services: - redis: - image: redis:alpine - restart: always - volumes: - - /data/jasmin/redis:/data - healthcheck: - test: redis-cli ping | grep PONG - deploy: - resources: - limits: - cpus: '0.2' - memory: 128M - - rabbit-mq: - image: rabbitmq:3.10-management-alpine - restart: always - volumes: - - /data/jasmin/rabbitmq:/var/lib/rabbitmq - healthcheck: - test: rabbitmq-diagnostics -q ping - deploy: - resources: - limits: - cpus: '0.5' - memory: 525M - - jasmin: - image: tarekaec/jasmin:0.10.13 -# command: > -# bash -c " -# sed -i "s/.*publish_submit_sm_resp\s*=.*/publish_submit_sm_resp=True/g" /etc/jasmin/jasmin.cfg -# /docker-entrypoint.sh -# " - ports: - - '${FORWARD_JASMIN_SMPP_PORT:-2776}:2775' - - '${FORWARD_JASMIN_CLI_PORT:-8991}:8990' - - '${FORWARD_JASMIN_HTTP_PORT:-1402}:1401' - volumes: - # - /data/jasmin/jasmin:/usr/jasmin/jasmin - - /data/jasmin/jasmin_config:/etc/jasmin - - /data/jasmin/jasmin_logs:/var/log/jasmin - - /data/jasmin/jasmin_resource:/etc/jasmin/resource - - /data/jasmin/jasmin_store:/etc/jasmin/store - depends_on: - - redis - - rabbit-mq - environment: - REDIS_CLIENT_HOST: ${REDIS_CLIENT_HOST:-redis} - REDIS_CLIENT_PORT: ${REDIS_CLIENT_PORT:-6379} - AMQP_BROKER_HOST: ${AMQP_BROKER_HOST:-rabbit-mq} - AMQP_BROKER_PORT: ${AMQP_BROKER_PORT:-5672} - ENABLE_PUBLISH_SUBMIT_SM_RESP: ${ENABLE_PUBLISH_SUBMIT_SM_RESP:-1} - RESTAPI_MODE: ${RESTAPI_MODE:-0} - deploy: - restart_policy: - condition: on-failure - resources: - limits: - cpus: '1' - memory: 256M - sms_logger: - image: tarekaec/jasmin_log:1.1 - volumes: - - /data/jasmin/jasmin_resource:/app/resource - environment: - DB_TYPE_MYSQL: ${DB_TYPE_MYSQL:-0} - AMQP_BROKER_HOST: ${AMQP_BROKER_HOST:-rabbit-mq} - AMQP_BROKER_PORT: ${AMQP_BROKER_PORT:-5672} - AMQP_SPEC_FILE: '/app/resource/amqp0-9-1.xml' - DB_HOST: ${DB_HOST:-172.17.0.1} - DB_DATABASE: ${DB_DATABASE:-jasmin} - DB_TABLE: ${DB_TABLE:-submit_log} - DB_USER: ${DB_USER:-jasmin} - DB_PASS: ${DB_PASS:-jasmin} - depends_on: - - rabbit-mq - restart: on-failure - healthcheck: - disable: true \ No newline at end of file diff --git a/config/docker/jasmin/jasmin/config/dlr.cfg b/config/docker/jasmin/jasmin/config/dlr.cfg deleted file mode 100644 index 060b236..0000000 --- a/config/docker/jasmin/jasmin/config/dlr.cfg +++ /dev/null @@ -1,75 +0,0 @@ -# -# This is the Jasmin DLR Daemon configuration file. -# DLR Daemon will start DLR throwers (http+smpp) and connect to SMPPServerPB -# -# For any modifications to this file, refer to Jasmin Documentation. -# If that does not help, post your question on Jasmin's web forum -# hosted at Google Groups: https://groups.google.com/group/jasmin-sms-gateway -# -# Do NOT simply read the instructions in here without understanding -# what they do. They're here only as hints or reminders. If you are unsure -# consult the online docs. - -[dlr-thrower] -# The following directives define the process of delivering delivery-receipts through http to third party -# application, it is explained in "HTTP API" documentation -# Sets socket timeout in seconds for outgoing client http connections. -#http_timeout = 30 -# Define how many seconds should pass within the queuing system for retrying a failed throw. -#retry_delay = 30 -# Define how many retries should be performed for failing throws of DLR. -#max_retries = 3 - -# Specify the pdu type to consider when throwing a receipt through SMPPs, possible values: -# - data_sm -# - deliver_sm (default pdu) -#dlr_pdu = deliver_sm - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/dlr-thrower.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[smpp-server-pb-client] -# The following directives define client connector to SMPPServerPB -#host = 127.0.0.1 -#port = 14000 -#username = smppsadmin -#password = smppspwd - -[amqp-broker] -# The following directives define the way how Jasmin is connecting to the AMQP Broker, -# default values must work with a freshly installed RabbitMQ server. -#host = 127.0.0.1 -host = rabbit-mq -vhost = / -spec = /etc/jasmin/resource/amqp0-9-1.xml -port = 5672 -username = guest -password = guest -#heartbeat = 0 \ No newline at end of file diff --git a/config/docker/jasmin/jasmin/config/dlrlookupd.cfg b/config/docker/jasmin/jasmin/config/dlrlookupd.cfg deleted file mode 100644 index 241edac..0000000 --- a/config/docker/jasmin/jasmin/config/dlrlookupd.cfg +++ /dev/null @@ -1,142 +0,0 @@ -# -# This is the Jasmin DLR Lookup Daemon configuration file. -# DLR Lookup Daemon will fetch dlr mappings from Redis and publish DLRContent -# to the right AMQP route. -# -# For any modifications to this file, refer to Jasmin Documentation. -# If that does not help, post your question on Jasmin's web forum -# hosted at Google Groups: https://groups.google.com/group/jasmin-sms-gateway -# -# Do NOT simply read the instructions in here without understanding -# what they do. They're here only as hints or reminders. If you are unsure -# consult the online docs. - -[amqp-broker] -# The following directives define the way how Jasmin is connecting to the AMQP Broker, -# default values must work with a freshly installed RabbitMQ server. -#host = 127.0.0.1 -host = rabbit-mq -vhost = / -spec = /etc/jasmin/resource/amqp0-9-1.xml -port = 5672 -username = guest -password = guest -#heartbeat = 0 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/amqp-client.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -#connection_loss_retry = True -#connection_failure_retry = True -#connection_loss_retry_delay = 10 -#connection_loss_failure_delay = 10 - -[redis-client] -# The following directives define the way how Jasmin is connecting to the redis server, -# default values must work with a freshly installed redis server. -#host = 127.0.0.1 -#port = 6379 -#dbid = 0 -#password = None -#poolsize = 10 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/redis-client.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[dlr] -# DLRLookup process id -pid = dlrlookupd-01 - -# DLRLookup mechanism configuration -#dlr_lookup_retry_delay = 10 -#dlr_lookup_max_retries = 2 - -# If smpp_receipt_on_success_submit_sm_resp is True, every connected user to smpp server will -# receive a receipt (data_sm or deliver_sm) whenever a submit_sm_resp is received -# for a message he sent and requested receipt for it. -#smpp_receipt_on_success_submit_sm_resp = False - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/messages.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = midnight - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S -#log_privacy = False \ No newline at end of file diff --git a/config/docker/jasmin/jasmin/config/interceptor.cfg b/config/docker/jasmin/jasmin/config/interceptor.cfg deleted file mode 100644 index 43d2c97..0000000 --- a/config/docker/jasmin/jasmin/config/interceptor.cfg +++ /dev/null @@ -1,57 +0,0 @@ -# -# This is the Jasmin interceptor configuration file. -# For any modifications to this file, refer to Jasmin Documentation. -# If that does not help, post your question on Jasmin's web forum -# hosted at Google Groups: https://groups.google.com/group/jasmin-sms-gateway -# -# Do NOT simply read the instructions in here without understanding -# what they do. They're here only as hints or reminders. If you are unsure -# consult the online docs. - -[interceptor] -# If you want you can bind a single interface, you can specify its IP here -#bind = 0.0.0.0 - -# Accept connections on the specified port, default is 8987 -#port = 8987 - -# If authentication is True, access will require entering a username and password -# as defined in admin_username and admin_password, you can disable this security -# layer by setting authentication to False, in this case admin_* values are ignored. -#authentication = True -#admin_username = iadmin -# This is a MD5 password digest hex encoded -#admin_password = dd8b84cdb60655fed3b9b2d668c5bd9e - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/interceptor.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -# This is a duration threshold (seconds) for logging slow scripts. -#log_slow_script = 1 diff --git a/config/docker/jasmin/jasmin/config/jasmin.cfg b/config/docker/jasmin/jasmin/config/jasmin.cfg deleted file mode 100644 index 1f06a6f..0000000 --- a/config/docker/jasmin/jasmin/config/jasmin.cfg +++ /dev/null @@ -1,662 +0,0 @@ -# -# This is the main Jasmin SMS gateway configuration file. -# For any modifications to this file, refer to Jasmin Documentation. -# If that does not help, post your question on Jasmin's web forum -# hosted at Google Groups: https://groups.google.com/group/jasmin-sms-gateway -# -# Do NOT simply read the instructions in here without understanding -# what they do. They're here only as hints or reminders. If you are unsure -# consult the online docs. - -[smpp-server] - -# SMPP Server identifier -#id = "smpps_01" - -# If you want you can bind a single interface, you can specify its IP here -#bind = 0.0.0.0 - -# Accept connections on the specified port, default is 2775 -#port = 2775 - -# Activate billing feature -# May be disabled if not needed/used -#billing_feature = True - -# Timeout for response to bind request -#sessionInitTimerSecs = 30 - -# Enquire link interval -#enquireLinkTimerSecs = 30 - -# Maximum time lapse allowed between transactions, after which, -# the connection is considered as inactive -#inactivityTimerSecs = 300 - -# Timeout for responses to any request PDU -#responseTimerSecs = 60 - -# Timeout for reading a single PDU, this is the maximum lapse of time between -# receiving PDU's header and its complete read, if the PDU reading timed out, -# the connection is considered as 'corrupt' and will reconnect -#pduReadTimerSecs = 10 - -# When message is routed to a SMPP Client connecter: How much time it is kept in -# redis waiting for receipt -#dlr_expiry = 86400 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/default-smpps_01.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = midnight - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S -#log_privacy = False - -[smpp-server-pb] -# If you want you can bind a single interface, you can specify its IP here -#bind = 0.0.0.0 - -# Accept connections on the specified port, default is 14000 -#port = 14000 - -# If authentication is True, access will require entering a username and password -# as defined in admin_username and admin_password, you can disable this security -# layer by setting authentication to False, in this case admin_* values are ignored. -#authentication = True -#admin_username = smppsadmin -# This is a MD5 password digest hex encoded -#admin_password = e97ab122faa16beea8682d84f3d2eea4 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/smpp-server-pb.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[client-management] -# Jasmin persists its configuration profiles in /etc/jasmin/store by -# default. You can specify a custom location here -#store_path = /etc/jasmin/store - -# If you want you can bind a single interface, you can specify its IP here -#bind = 0.0.0.0 - -# Accept connections on the specified port, default is 8989 -#port = 8989 - -# If authentication is True, access will require entering a username and password -# as defined in admin_username and admin_password, you can disable this security -# layer by setting authentication to False, in this case admin_* values are ignored. -#authentication = True -#admin_username = cmadmin -# This is a MD5 password digest hex encoded -#admin_password = e1c5136acafb7016bc965597c992eb82 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/smppclient-manager.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -# The protocol version used to pickle objects before transfering -# them to client side, this is used in the client manager only, -# the pickle protocol defined in SMPPClientManagerPBProxy is set -# to 2 and is not configurable -#pickle_protocol = 2 - -[service-smppclient] -# For each smppclient connector a service is associated -# refer to "Message flows" documentation for more details - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/service-smppclients.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[sm-listener] -# SM listener consumes submit_sm and deliver_sm messages from amqp broker -# refer to "Message flows" documentation for more details - -# If publish_submit_sm_resp is True, any received SubmitSm PDU will be published -# to the 'messaging' exchange on 'submit.sm.resp.CID' route, useful when you have -# a third party application waiting for these messages. -#publish_submit_sm_resp = False -publish_submit_sm_resp = True - -# If the error is defined in submit_error_retrial, Jasmin will retry sending submit_sm if it -# gets one of these errors. -# submit_sm retrial will be executed 'count' times and delayed for 'delay' seconds each time. -#submit_error_retrial = { -# 'ESME_RSYSERR': {'count': 2, 'delay': 30}, -# 'ESME_RTHROTTLED': {'count': 20, 'delay': 30}, -# 'ESME_RMSGQFUL': {'count': 2, 'delay': 180}, -# 'ESME_RINVSCHED': {'count': 2, 'delay': 300}, -# } - -# The maximum number of seconds a message can stay in queue waiting for SMPPC to get ready for -# delivey (connected and bound). -#submit_max_age_smppc_not_ready = 1200 - -# Delay (seconds) when retrying a submit with a not-yet ready SMPPc -# Hint: for large scale messaging deployment, it is advised to set this value to few seconds -# in order to keep Jasmin free. -#submit_retrial_delay_smppc_not_ready = 30 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/messages.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = midnight - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S -#log_privacy = False - -[dlr] -# DLRLookup process id -#pid = main - -# DLRLookup mechanism configuration -#dlr_lookup_retry_delay = 10 -#dlr_lookup_max_retries = 2 - -# If smpp_receipt_on_success_submit_sm_resp is True, every connected user to smpp server will -# receive a receipt (data_sm or deliver_sm) whenever a submit_sm_resp is received -# for a message he sent and requested receipt for it. -#smpp_receipt_on_success_submit_sm_resp = False - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/messages.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = midnight - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S -#log_privacy = False - -[amqp-broker] -# The following directives define the way how Jasmin is connecting to the AMQP Broker, -# default values must work with a freshly installed RabbitMQ server. -host = rabbit-mq -vhost = / -spec = /etc/jasmin/resource/amqp0-9-1.xml -port = 5672 -username = guest -password = guest -heartbeat = 0 -#host = 127.0.0.1 -#vhost = / -#spec = /etc/jasmin/resource/amqp0-9-1.xml -#port = 5672 -#username = guest -#password = guest -#heartbeat = 0 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/amqp-client.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -#connection_loss_retry = True -#connection_failure_retry = True -#connection_loss_retry_delay = 10 -#connection_loss_failure_delay = 10 - -[http-api] -# If you want you can bind a single interface, you can specify its IP here -#bind = 0.0.0.0 - -# Accept connections on the specified port, default is 1401 -#port = 1401 - -# Activate billing feature -# May be disabled if not needed/used -#billing_feature = True - -# How many message parts you can get for a long message, default is 5 so you -# can't exceed 800 characters (160x5) when sending a long latin message. -#long_content_max_parts = 5 - -# Splitting long content can be made through SAR options or UDH -# Possible values are: sar and udh -#long_content_split = udh - -# Specify the access log file path -#access_log = /var/log/jasmin/http-access.log - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/http-api.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S -#log_privacy = False - -[router] -# Jasmin router persists its routing configuration profiles in /etc/jasmin/store by -# default. You can specify a custom location here -#store_path = /etc/jasmin/store - -# Router will automatically persist users and groups to disk whenever a critical information -# is updated (ex: user balance), persistence is executed every persistence_timer_secs -#persistence_timer_secs = 60 - -# If you want you can bind a single interface, you can specify its IP here -#bind = 0.0.0.0 - -# Accept connections on the specified port, default is 8988 -#port = 8988 - -# If authentication is True, access will require entering a username and password -# as defined in admin_username and admin_password, you can disable this security -# layer by setting authentication to False, in this case admin_* values are ignored. -#authentication = True -#admin_username = radmin -# This is a MD5 password digest hex encoded -#admin_password = 82a606ca5a0deea2b5777756788af5c8 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/router.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -# The protocol version used to pickle objects before transfering -# them to client side, this is used in the client manager only, -# the pickle protocol defined in SMPPClientManagerPBProxy is set -# to 2 and is not configurable -#pickle_protocol = 2 - -[deliversm-thrower] -# The following directives define the process of delivery SMS-MO through http to third party -# application, it is explained in "HTTP API" documentation -# Sets socket timeout in seconds for outgoing client http connections. -#http_timeout = 30 -# Define how many seconds should pass within the queuing system for retrying a failed throw. -#retry_delay = 30 -# Define how many retries should be performed for failing throws of SMS-MO. -#max_retries = 3 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/deliversm-thrower.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[dlr-thrower] -# The following directives define the process of delivering delivery-receipts through http to third party -# application, it is explained in "HTTP API" documentation -# Sets socket timeout in seconds for outgoing client http connections. -#http_timeout = 30 -# Define how many seconds should pass within the queuing system for retrying a failed throw. -#retry_delay = 30 -# Define how many retries should be performed for failing throws of DLR. -#max_retries = 3 - -# Specify the pdu type to consider when throwing a receipt through SMPPs, possible values: -# - data_sm -# - deliver_sm (default pdu) -#dlr_pdu = deliver_sm - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/dlr-thrower.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[redis-client] -# The following directives define the way how Jasmin is connecting to the redis server, -# default values must work with a freshly installed redis server. -host = redis -port = 6379 -dbid = 0 -password = None -poolsize = 10 -#host = 127.0.0.1 -#port = 6379 -#dbid = 0 -#password = None -#poolsize = 10 - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/redis-client.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[jcli] -# If you want you can bind a single interface, you can specify its IP here -#bind = 127.0.0.1 - -# Accept connections on the specified port, default is 8990 -#port = 8990 - -# If authentication is True, access will require entering a username and password -# as defined in admin_username and admin_password, you can disable this security -# layer by setting authentication to False, in this case admin_* values are ignored. -#authentication = True -#admin_username = jcliadmin -# This is a MD5 password digest hex encoded -#admin_password = 79e9b0aa3f3e7c53e916f7ac47439bcb - -# Specify the server verbosity level. -# This can be one of: -# NOTSET (disable logging) -# DEBUG (a lot of information, useful for development/testing) -# INFO (moderately verbose, what you want in production probably) -# WARNING (only very important / critical messages and errors are logged) -# ERROR (only errors / critical messages are logged) -# CRITICAL (only critical messages are logged) -#log_level = INFO - -# Specify the log file path -#log_file = /var/log/jasmin/jcli.log - -# When to rotate the log file, possible values: -# S: Seconds -# M: Minutes -# H: Hours -# D: Days -# W0-W6: Weekday (0=Monday) -# midnight: Roll over at midnight -#log_rotate = W6 - -# The following directives define logging patterns including: -# - log_format: using python logging's attributes -# refer to https://docs.python.org/2/library/logging.html#logrecord-attributes -# -log_date_format: using python strftime formating directives -# refer to https://docs.python.org/2/library/time.html#time.strftime -#log_format = %(asctime)s %(levelname)-8s %(process)d %(message)s -#log_date_format = %Y-%m-%d %H:%M:%S - -[interceptor-client] -# The following directives define client connector to InterceptorPB, it's used when jasmind -# is started with --enable-interceptor-client -#host = 127.0.0.1 -#port = 8987 -#username = iadmin -#password = ipwd diff --git a/config/docker/jasmin/jasmin/logs/.gitignore b/config/docker/jasmin/jasmin/logs/.gitignore deleted file mode 100644 index bf0824e..0000000 --- a/config/docker/jasmin/jasmin/logs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.log \ No newline at end of file diff --git a/config/docker/jasmin/jasmin/resource/amqp0-8.stripped.rabbitmq.xml b/config/docker/jasmin/jasmin/resource/amqp0-8.stripped.rabbitmq.xml deleted file mode 100644 index d1fd2c0..0000000 --- a/config/docker/jasmin/jasmin/resource/amqp0-8.stripped.rabbitmq.xml +++ /dev/null @@ -1,771 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/config/docker/jasmin/jasmin/resource/amqp0-9-1.xml b/config/docker/jasmin/jasmin/resource/amqp0-9-1.xml deleted file mode 100644 index da785eb..0000000 --- a/config/docker/jasmin/jasmin/resource/amqp0-9-1.xml +++ /dev/null @@ -1,2843 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Indicates that the method completed successfully. This reply code is - reserved for future use - the current protocol design does not use positive - confirmation and reply codes are sent only in case of an error. - - - - - - The client attempted to transfer content larger than the server could accept - at the present time. The client may retry at a later time. - - - - - - When the exchange cannot deliver to a consumer when the immediate flag is - set. As a result of pending data on the queue or the absence of any - consumers of the queue. - - - - - - An operator intervened to close the connection for some reason. The client - may retry at some later date. - - - - - - The client tried to work with an unknown virtual host. - - - - - - The client attempted to work with a server entity to which it has no - access due to security settings. - - - - - - The client attempted to work with a server entity that does not exist. - - - - - - The client attempted to work with a server entity to which it has no - access because another client is working with it. - - - - - - The client requested a method that was not allowed because some precondition - failed. - - - - - - The sender sent a malformed frame that the recipient could not decode. - This strongly implies a programming error in the sending peer. - - - - - - The sender sent a frame that contained illegal values for one or more - fields. This strongly implies a programming error in the sending peer. - - - - - - The client sent an invalid sequence of frames, attempting to perform an - operation that was considered invalid by the server. This usually implies - a programming error in the client. - - - - - - The client attempted to work with a channel that had not been correctly - opened. This most likely indicates a fault in the client layer. - - - - - - The peer sent a frame that was not expected, usually in the context of - a content header and body. This strongly indicates a fault in the peer's - content processing. - - - - - - The server could not complete the method because it lacked sufficient - resources. This may be due to the client creating too many of some type - of entity. - - - - - - The client tried to work with some entity in a manner that is prohibited - by the server, due to security settings or by some other criteria. - - - - - - The client tried to use functionality that is not implemented in the - server. - - - - - - The server could not complete the method because of an internal error. - The server may require intervention by an operator in order to resume - normal operations. - - - - - - - - - - Identifier for the consumer, valid within the current channel. - - - - - - The server-assigned and channel-specific delivery tag - - - - The delivery tag is valid only within the channel from which the message was - received. I.e. a client MUST NOT receive a message on one channel and then - acknowledge it on another. - - - - - The server MUST NOT use a zero value for delivery tags. Zero is reserved - for client use, meaning "all messages so far received". - - - - - - - The exchange name is a client-selected string that identifies the exchange for - publish methods. - - - - - - - - - - If this field is set the server does not expect acknowledgements for - messages. That is, when a message is delivered to the client the server - assumes the delivery will succeed and immediately dequeues it. This - functionality may increase performance but at the cost of reliability. - Messages can get lost if a client dies before they are delivered to the - application. - - - - - - If the no-local field is set the server will not send messages to the connection that - published them. - - - - - - If set, the server will not respond to the method. The client should not wait - for a reply method. If the server could not complete the method it will raise a - channel or connection exception. - - - - - - Unconstrained. - - - - - - - - This table provides a set of peer properties, used for identification, debugging, - and general information. - - - - - - The queue name identifies the queue within the vhost. In methods where the queue - name may be blank, and that has no specific significance, this refers to the - 'current' queue for the channel, meaning the last queue that the client declared - on the channel. If the client did not declare a queue, and the method needs a - queue name, this will result in a 502 (syntax error) channel exception. - - - - - - - - This indicates that the message has been previously delivered to this or - another client. - - - - The server SHOULD try to signal redelivered messages when it can. When - redelivering a message that was not successfully acknowledged, the server - SHOULD deliver it to the original client if possible. - - - Declare a shared queue and publish a message to the queue. Consume the - message using explicit acknowledgements, but do not acknowledge the - message. Close the connection, reconnect, and consume from the queue - again. The message should arrive with the redelivered flag set. - - - - - The client MUST NOT rely on the redelivered field but should take it as a - hint that the message may already have been processed. A fully robust - client must be able to track duplicate received messages on non-transacted, - and locally-transacted channels. - - - - - - - The number of messages in the queue, which will be zero for newly-declared - queues. This is the number of messages present in the queue, and committed - if the channel on which they were published is transacted, that are not - waiting acknowledgement. - - - - - - The reply code. The AMQ reply codes are defined as constants at the start - of this formal specification. - - - - - - - The localised reply text. This text can be logged as an aid to resolving - issues. - - - - - - - - - - - - - - - - - - - - The connection class provides methods for a client to establish a network connection to - a server, and for both peers to operate the connection thereafter. - - - - connection = open-connection *use-connection close-connection - open-connection = C:protocol-header - S:START C:START-OK - *challenge - S:TUNE C:TUNE-OK - C:OPEN S:OPEN-OK - challenge = S:SECURE C:SECURE-OK - use-connection = *channel - close-connection = C:CLOSE S:CLOSE-OK - / S:CLOSE C:CLOSE-OK - - - - - - - - - - This method starts the connection negotiation process by telling the client the - protocol version that the server proposes, along with a list of security mechanisms - which the client can use for authentication. - - - - - If the server cannot support the protocol specified in the protocol header, - it MUST respond with a valid protocol header and then close the socket - connection. - - - The client sends a protocol header containing an invalid protocol name. - The server MUST respond by sending a valid protocol header and then closing - the connection. - - - - - The server MUST provide a protocol version that is lower than or equal to - that requested by the client in the protocol header. - - - The client requests a protocol version that is higher than any valid - implementation, e.g. 2.0. The server must respond with a protocol header - indicating its supported protocol version, e.g. 1.0. - - - - - If the client cannot handle the protocol version suggested by the server - it MUST close the socket connection without sending any further data. - - - The server sends a protocol version that is lower than any valid - implementation, e.g. 0.1. The client must respond by closing the - connection without sending any further data. - - - - - - - - - The major version number can take any value from 0 to 99 as defined in the - AMQP specification. - - - - - - The minor version number can take any value from 0 to 99 as defined in the - AMQP specification. - - - - - - - The properties SHOULD contain at least these fields: "host", specifying the - server host name or address, "product", giving the name of the server product, - "version", giving the name of the server version, "platform", giving the name - of the operating system, "copyright", if appropriate, and "information", giving - other general information. - - - Client connects to server and inspects the server properties. It checks for - the presence of the required fields. - - - - - - - A list of the security mechanisms that the server supports, delimited by spaces. - - - - - - - A list of the message locales that the server supports, delimited by spaces. The - locale defines the language in which the server will send reply texts. - - - - The server MUST support at least the en_US locale. - - - Client connects to server and inspects the locales field. It checks for - the presence of the required locale(s). - - - - - - - - - This method selects a SASL security mechanism. - - - - - - - - - The properties SHOULD contain at least these fields: "product", giving the name - of the client product, "version", giving the name of the client version, "platform", - giving the name of the operating system, "copyright", if appropriate, and - "information", giving other general information. - - - - - - - A single security mechanisms selected by the client, which must be one of those - specified by the server. - - - - The client SHOULD authenticate using the highest-level security profile it - can handle from the list provided by the server. - - - - - If the mechanism field does not contain one of the security mechanisms - proposed by the server in the Start method, the server MUST close the - connection without sending any further data. - - - Client connects to server and sends an invalid security mechanism. The - server must respond by closing the connection (a socket close, with no - connection close negotiation). - - - - - - - - A block of opaque data passed to the security mechanism. The contents of this - data are defined by the SASL security mechanism. - - - - - - - A single message locale selected by the client, which must be one of those - specified by the server. - - - - - - - - - - The SASL protocol works by exchanging challenges and responses until both peers have - received sufficient information to authenticate each other. This method challenges - the client to provide more information. - - - - - - - - Challenge information, a block of opaque binary data passed to the security - mechanism. - - - - - - - This method attempts to authenticate, passing a block of SASL data for the security - mechanism at the server side. - - - - - - - A block of opaque data passed to the security mechanism. The contents of this - data are defined by the SASL security mechanism. - - - - - - - - - - This method proposes a set of connection configuration values to the client. The - client can accept and/or adjust these. - - - - - - - - - Specifies highest channel number that the server permits. Usable channel numbers - are in the range 1..channel-max. Zero indicates no specified limit. - - - - - - The largest frame size that the server proposes for the connection, including - frame header and end-byte. The client can negotiate a lower value. Zero means - that the server does not impose any specific limit but may reject very large - frames if it cannot allocate resources for them. - - - - Until the frame-max has been negotiated, both peers MUST accept frames of up - to frame-min-size octets large, and the minimum negotiated value for frame-max - is also frame-min-size. - - - Client connects to server and sends a large properties field, creating a frame - of frame-min-size octets. The server must accept this frame. - - - - - - - The delay, in seconds, of the connection heartbeat that the server wants. - Zero means the server does not want a heartbeat. - - - - - - - This method sends the client's connection tuning parameters to the server. - Certain fields are negotiated, others provide capability information. - - - - - - - The maximum total number of channels that the client will use per connection. - - - - If the client specifies a channel max that is higher than the value provided - by the server, the server MUST close the connection without attempting a - negotiated close. The server may report the error in some fashion to assist - implementors. - - - - - - - - - The largest frame size that the client and server will use for the connection. - Zero means that the client does not impose any specific limit but may reject - very large frames if it cannot allocate resources for them. Note that the - frame-max limit applies principally to content frames, where large contents can - be broken into frames of arbitrary size. - - - - Until the frame-max has been negotiated, both peers MUST accept frames of up - to frame-min-size octets large, and the minimum negotiated value for frame-max - is also frame-min-size. - - - - - If the client specifies a frame max that is higher than the value provided - by the server, the server MUST close the connection without attempting a - negotiated close. The server may report the error in some fashion to assist - implementors. - - - - - - - The delay, in seconds, of the connection heartbeat that the client wants. Zero - means the client does not want a heartbeat. - - - - - - - - - This method opens a connection to a virtual host, which is a collection of - resources, and acts to separate multiple application domains within a server. - The server may apply arbitrary limits per virtual host, such as the number - of each type of entity that may be used, per connection and/or in total. - - - - - - - - The name of the virtual host to work with. - - - - If the server supports multiple virtual hosts, it MUST enforce a full - separation of exchanges, queues, and all associated entities per virtual - host. An application, connected to a specific virtual host, MUST NOT be able - to access resources of another virtual host. - - - - - The server SHOULD verify that the client has permission to access the - specified virtual host. - - - - - - - - - - - - This method signals to the client that the connection is ready for use. - - - - - - - - - - - This method indicates that the sender wants to close the connection. This may be - due to internal conditions (e.g. a forced shut-down) or due to an error handling - a specific method, i.e. an exception. When a close is due to an exception, the - sender provides the class and method id of the method which caused the exception. - - - - After sending this method, any received methods except Close and Close-OK MUST - be discarded. The response to receiving a Close after sending Close must be to - send Close-Ok. - - - - - - - - - - - - - When the close is provoked by a method exception, this is the class of the - method. - - - - - - When the close is provoked by a method exception, this is the ID of the method. - - - - - - - This method confirms a Connection.Close method and tells the recipient that it is - safe to release resources for the connection and close the socket. - - - - A peer that detects a socket closure without having received a Close-Ok - handshake method SHOULD log the error. - - - - - - - - - - - - The channel class provides methods for a client to establish a channel to a - server and for both peers to operate the channel thereafter. - - - - channel = open-channel *use-channel close-channel - open-channel = C:OPEN S:OPEN-OK - use-channel = C:FLOW S:FLOW-OK - / S:FLOW C:FLOW-OK - / functional-class - close-channel = C:CLOSE S:CLOSE-OK - / S:CLOSE C:CLOSE-OK - - - - - - - - - - This method opens a channel to the server. - - - - The client MUST NOT use this method on an already-opened channel. - - - Client opens a channel and then reopens the same channel. - - - - - - - - - - - This method signals to the client that the channel is ready for use. - - - - - - - - - - - This method asks the peer to pause or restart the flow of content data sent by - a consumer. This is a simple flow-control mechanism that a peer can use to avoid - overflowing its queues or otherwise finding itself receiving more messages than - it can process. Note that this method is not intended for window control. It does - not affect contents returned by Basic.Get-Ok methods. - - - - - When a new channel is opened, it is active (flow is active). Some applications - assume that channels are inactive until started. To emulate this behaviour a - client MAY open the channel, then pause it. - - - - - - When sending content frames, a peer SHOULD monitor the channel for incoming - methods and respond to a Channel.Flow as rapidly as possible. - - - - - - A peer MAY use the Channel.Flow method to throttle incoming content data for - internal reasons, for example, when exchanging data over a slower connection. - - - - - - The peer that requests a Channel.Flow method MAY disconnect and/or ban a peer - that does not respect the request. This is to prevent badly-behaved clients - from overwhelming a server. - - - - - - - - - - - If 1, the peer starts sending content frames. If 0, the peer stops sending - content frames. - - - - - - - Confirms to the peer that a flow command was received and processed. - - - - - - Confirms the setting of the processed flow method: 1 means the peer will start - sending or continue to send content frames; 0 means it will not. - - - - - - - - - This method indicates that the sender wants to close the channel. This may be due to - internal conditions (e.g. a forced shut-down) or due to an error handling a specific - method, i.e. an exception. When a close is due to an exception, the sender provides - the class and method id of the method which caused the exception. - - - - After sending this method, any received methods except Close and Close-OK MUST - be discarded. The response to receiving a Close after sending Close must be to - send Close-Ok. - - - - - - - - - - - - - When the close is provoked by a method exception, this is the class of the - method. - - - - - - When the close is provoked by a method exception, this is the ID of the method. - - - - - - - This method confirms a Channel.Close method and tells the recipient that it is safe - to release resources for the channel. - - - - A peer that detects a socket closure without having received a Channel.Close-Ok - handshake method SHOULD log the error. - - - - - - - - - - - - Exchanges match and distribute messages across queues. Exchanges can be configured in - the server or declared at runtime. - - - - exchange = C:DECLARE S:DECLARE-OK - / C:DELETE S:DELETE-OK - - - - - - - - The server MUST implement these standard exchange types: fanout, direct. - - - Client attempts to declare an exchange with each of these standard types. - - - - - The server SHOULD implement these standard exchange types: topic, headers. - - - Client attempts to declare an exchange with each of these standard types. - - - - - The server MUST, in each virtual host, pre-declare an exchange instance - for each standard exchange type that it implements, where the name of the - exchange instance, if defined, is "amq." followed by the exchange type name. - - - The server MUST, in each virtual host, pre-declare at least two direct - exchange instances: one named "amq.direct", the other with no public name - that serves as a default exchange for Publish methods. - - - Client declares a temporary queue and attempts to bind to each required - exchange instance ("amq.fanout", "amq.direct", "amq.topic", and "amq.headers" - if those types are defined). - - - - - The server MUST pre-declare a direct exchange with no public name to act as - the default exchange for content Publish methods and for default queue bindings. - - - Client checks that the default exchange is active by specifying a queue - binding with no exchange name, and publishing a message with a suitable - routing key but without specifying the exchange name, then ensuring that - the message arrives in the queue correctly. - - - - - The server MUST NOT allow clients to access the default exchange except - by specifying an empty exchange name in the Queue.Bind and content Publish - methods. - - - - - The server MAY implement other exchange types as wanted. - - - - - - - - This method creates an exchange if it does not already exist, and if the exchange - exists, verifies that it is of the correct and expected class. - - - - The server SHOULD support a minimum of 16 exchanges per virtual host and - ideally, impose no limit except as defined by available resources. - - - The client declares as many exchanges as it can until the server reports - an error; the number of exchanges successfully declared must be at least - sixteen. - - - - - - - - - - - - - Exchange names starting with "amq." are reserved for pre-declared and - standardised exchanges. The client MAY declare an exchange starting with - "amq." if the passive option is set, or the exchange already exists. - - - The client attempts to declare a non-existing exchange starting with - "amq." and with the passive option set to zero. - - - - - The exchange name consists of a non-empty sequence of these characters: - letters, digits, hyphen, underscore, period, or colon. - - - The client attempts to declare an exchange with an illegal name. - - - - - - - - Each exchange belongs to one of a set of exchange types implemented by the - server. The exchange types define the functionality of the exchange - i.e. how - messages are routed through it. It is not valid or meaningful to attempt to - change the type of an existing exchange. - - - - Exchanges cannot be redeclared with different types. The client MUST not - attempt to redeclare an existing exchange with a different type than used - in the original Exchange.Declare method. - - - TODO. - - - - - The client MUST NOT attempt to declare an exchange with a type that the - server does not support. - - - TODO. - - - - - - - If set, the server will reply with Declare-Ok if the exchange already - exists with the same name, and raise an error if not. The client can - use this to check whether an exchange exists without modifying the - server state. When set, all other method fields except name and no-wait - are ignored. A declare with both passive and no-wait has no effect. - Arguments are compared for semantic equivalence. - - - - If set, and the exchange does not already exist, the server MUST - raise a channel exception with reply code 404 (not found). - - - TODO. - - - - - If not set and the exchange exists, the server MUST check that the - existing exchange has the same values for type, durable, and arguments - fields. The server MUST respond with Declare-Ok if the requested - exchange matches these fields, and MUST raise a channel exception if - not. - - - TODO. - - - - - - - If set when creating a new exchange, the exchange will be marked as durable. - Durable exchanges remain active when a server restarts. Non-durable exchanges - (transient exchanges) are purged if/when a server restarts. - - - - The server MUST support both durable and transient exchanges. - - - TODO. - - - - - - - - - - - - - A set of arguments for the declaration. The syntax and semantics of these - arguments depends on the server implementation. - - - - - - - This method confirms a Declare method and confirms the name of the exchange, - essential for automatically-named exchanges. - - - - - - - - - This method deletes an exchange. When an exchange is deleted all queue bindings on - the exchange are cancelled. - - - - - - - - - - - - The client MUST NOT attempt to delete an exchange that does not exist. - - - - - - - - If set, the server will only delete the exchange if it has no queue bindings. If - the exchange has queue bindings the server does not delete it but raises a - channel exception instead. - - - - The server MUST NOT delete an exchange that has bindings on it, if the if-unused - field is true. - - - The client declares an exchange, binds a queue to it, then tries to delete it - setting if-unused to true. - - - - - - - - - This method confirms the deletion of an exchange. - - - - - - - - - Queues store and forward messages. Queues can be configured in the server or created at - runtime. Queues must be attached to at least one exchange in order to receive messages - from publishers. - - - - queue = C:DECLARE S:DECLARE-OK - / C:BIND S:BIND-OK - / C:UNBIND S:UNBIND-OK - / C:PURGE S:PURGE-OK - / C:DELETE S:DELETE-OK - - - - - - - - - - This method creates or checks a queue. When creating a new queue the client can - specify various properties that control the durability of the queue and its - contents, and the level of sharing for the queue. - - - - - The server MUST create a default binding for a newly-declared queue to the - default exchange, which is an exchange of type 'direct' and use the queue - name as the routing key. - - - Client declares a new queue, and then without explicitly binding it to an - exchange, attempts to send a message through the default exchange binding, - i.e. publish a message to the empty exchange, with the queue name as routing - key. - - - - - - The server SHOULD support a minimum of 256 queues per virtual host and ideally, - impose no limit except as defined by available resources. - - - Client attempts to declare as many queues as it can until the server reports - an error. The resulting count must at least be 256. - - - - - - - - - - - - - The queue name MAY be empty, in which case the server MUST create a new - queue with a unique generated name and return this to the client in the - Declare-Ok method. - - - Client attempts to declare several queues with an empty name. The client then - verifies that the server-assigned names are unique and different. - - - - - Queue names starting with "amq." are reserved for pre-declared and - standardised queues. The client MAY declare a queue starting with - "amq." if the passive option is set, or the queue already exists. - - - The client attempts to declare a non-existing queue starting with - "amq." and with the passive option set to zero. - - - - - The queue name can be empty, or a sequence of these characters: - letters, digits, hyphen, underscore, period, or colon. - - - The client attempts to declare a queue with an illegal name. - - - - - - - If set, the server will reply with Declare-Ok if the queue already - exists with the same name, and raise an error if not. The client can - use this to check whether a queue exists without modifying the - server state. When set, all other method fields except name and no-wait - are ignored. A declare with both passive and no-wait has no effect. - Arguments are compared for semantic equivalence. - - - - The client MAY ask the server to assert that a queue exists without - creating the queue if not. If the queue does not exist, the server - treats this as a failure. - - - Client declares an existing queue with the passive option and expects - the server to respond with a declare-ok. Client then attempts to declare - a non-existent queue with the passive option, and the server must close - the channel with the correct reply-code. - - - - - If not set and the queue exists, the server MUST check that the - existing queue has the same values for durable, exclusive, auto-delete, - and arguments fields. The server MUST respond with Declare-Ok if the - requested queue matches these fields, and MUST raise a channel exception - if not. - - - TODO. - - - - - - - If set when creating a new queue, the queue will be marked as durable. Durable - queues remain active when a server restarts. Non-durable queues (transient - queues) are purged if/when a server restarts. Note that durable queues do not - necessarily hold persistent messages, although it does not make sense to send - persistent messages to a transient queue. - - - - The server MUST recreate the durable queue after a restart. - - - Client declares a durable queue. The server is then restarted. The client - then attempts to send a message to the queue. The message should be successfully - delivered. - - - - - The server MUST support both durable and transient queues. - - A client declares two named queues, one durable and one transient. - - - - - - - Exclusive queues may only be accessed by the current connection, and are - deleted when that connection closes. Passive declaration of an exclusive - queue by other connections are not allowed. - - - - - The server MUST support both exclusive (private) and non-exclusive (shared) - queues. - - - A client declares two named queues, one exclusive and one non-exclusive. - - - - - - The client MAY NOT attempt to use a queue that was declared as exclusive - by another still-open connection. - - - One client declares an exclusive queue. A second client on a different - connection attempts to declare, bind, consume, purge, delete, or declare - a queue of the same name. - - - - - - - If set, the queue is deleted when all consumers have finished using it. The last - consumer can be cancelled either explicitly or because its channel is closed. If - there was no consumer ever on the queue, it won't be deleted. Applications can - explicitly delete auto-delete queues using the Delete method as normal. - - - - - The server MUST ignore the auto-delete field if the queue already exists. - - - Client declares two named queues, one as auto-delete and one explicit-delete. - Client then attempts to declare the two queues using the same names again, - but reversing the value of the auto-delete field in each case. Verify that the - queues still exist with the original auto-delete flag values. - - - - - - - - - A set of arguments for the declaration. The syntax and semantics of these - arguments depends on the server implementation. - - - - - - - This method confirms a Declare method and confirms the name of the queue, essential - for automatically-named queues. - - - - - - - Reports the name of the queue. If the server generated a queue name, this field - contains that name. - - - - - - - - - Reports the number of active consumers for the queue. Note that consumers can - suspend activity (Channel.Flow) in which case they do not appear in this count. - - - - - - - - - This method binds a queue to an exchange. Until a queue is bound it will not - receive any messages. In a classic messaging model, store-and-forward queues - are bound to a direct exchange and subscription queues are bound to a topic - exchange. - - - - - A server MUST allow ignore duplicate bindings - that is, two or more bind - methods for a specific queue, with identical arguments - without treating these - as an error. - - - A client binds a named queue to an exchange. The client then repeats the bind - (with identical arguments). - - - - - - A server MUST not deliver the same message more than once to a queue, even if - the queue has multiple bindings that match the message. - - - A client declares a named queue and binds it using multiple bindings to the - amq.topic exchange. The client then publishes a message that matches all its - bindings. - - - - - - The server MUST allow a durable queue to bind to a transient exchange. - - - A client declares a transient exchange. The client then declares a named durable - queue and then attempts to bind the transient exchange to the durable queue. - - - - - - Bindings of durable queues to durable exchanges are automatically durable - and the server MUST restore such bindings after a server restart. - - - A server declares a named durable queue and binds it to a durable exchange. The - server is restarted. The client then attempts to use the queue/exchange combination. - - - - - - The server SHOULD support at least 4 bindings per queue, and ideally, impose no - limit except as defined by available resources. - - - A client declares a named queue and attempts to bind it to 4 different - exchanges. - - - - - - - - - - - - Specifies the name of the queue to bind. - - - The client MUST either specify a queue name or have previously declared a - queue on the same channel - - - The client opens a channel and attempts to bind an unnamed queue. - - - - - The client MUST NOT attempt to bind a queue that does not exist. - - - The client attempts to bind a non-existent queue. - - - - - - - - A client MUST NOT be allowed to bind a queue to a non-existent exchange. - - - A client attempts to bind an named queue to a undeclared exchange. - - - - - The server MUST accept a blank exchange name to mean the default exchange. - - - The client declares a queue and binds it to a blank exchange name. - - - - - - - Specifies the routing key for the binding. The routing key is used for routing - messages depending on the exchange configuration. Not all exchanges use a - routing key - refer to the specific exchange documentation. If the queue name - is empty, the server uses the last queue declared on the channel. If the - routing key is also empty, the server uses this queue name for the routing - key as well. If the queue name is provided but the routing key is empty, the - server does the binding with that empty routing key. The meaning of empty - routing keys depends on the exchange implementation. - - - - If a message queue binds to a direct exchange using routing key K and a - publisher sends the exchange a message with routing key R, then the message - MUST be passed to the message queue if K = R. - - - - - - - - - A set of arguments for the binding. The syntax and semantics of these arguments - depends on the exchange class. - - - - - - This method confirms that the bind was successful. - - - - - - - - This method unbinds a queue from an exchange. - - If a unbind fails, the server MUST raise a connection exception. - - - - - - - - - Specifies the name of the queue to unbind. - - - The client MUST either specify a queue name or have previously declared a - queue on the same channel - - - The client opens a channel and attempts to unbind an unnamed queue. - - - - - The client MUST NOT attempt to unbind a queue that does not exist. - - - The client attempts to unbind a non-existent queue. - - - - - - The name of the exchange to unbind from. - - - The client MUST NOT attempt to unbind a queue from an exchange that - does not exist. - - - The client attempts to unbind a queue from a non-existent exchange. - - - - - The server MUST accept a blank exchange name to mean the default exchange. - - - The client declares a queue and binds it to a blank exchange name. - - - - - - Specifies the routing key of the binding to unbind. - - - - Specifies the arguments of the binding to unbind. - - - - - This method confirms that the unbind was successful. - - - - - - - - This method removes all messages from a queue which are not awaiting - acknowledgment. - - - - - The server MUST NOT purge messages that have already been sent to a client - but not yet acknowledged. - - - - - - The server MAY implement a purge queue or log that allows system administrators - to recover accidentally-purged messages. The server SHOULD NOT keep purged - messages in the same storage spaces as the live messages since the volumes of - purged messages may get very large. - - - - - - - - - - - - Specifies the name of the queue to purge. - - - The client MUST either specify a queue name or have previously declared a - queue on the same channel - - - The client opens a channel and attempts to purge an unnamed queue. - - - - - The client MUST NOT attempt to purge a queue that does not exist. - - - The client attempts to purge a non-existent queue. - - - - - - - - - This method confirms the purge of a queue. - - - - - - Reports the number of messages purged. - - - - - - - - - This method deletes a queue. When a queue is deleted any pending messages are sent - to a dead-letter queue if this is defined in the server configuration, and all - consumers on the queue are cancelled. - - - - - The server SHOULD use a dead-letter queue to hold messages that were pending on - a deleted queue, and MAY provide facilities for a system administrator to move - these messages back to an active queue. - - - - - - - - - - - - Specifies the name of the queue to delete. - - - The client MUST either specify a queue name or have previously declared a - queue on the same channel - - - The client opens a channel and attempts to delete an unnamed queue. - - - - - The client MUST NOT attempt to delete a queue that does not exist. - - - The client attempts to delete a non-existent queue. - - - - - - - If set, the server will only delete the queue if it has no consumers. If the - queue has consumers the server does does not delete it but raises a channel - exception instead. - - - - The server MUST NOT delete a queue that has consumers on it, if the if-unused - field is true. - - - The client declares a queue, and consumes from it, then tries to delete it - setting if-unused to true. - - - - - - - If set, the server will only delete the queue if it has no messages. - - - - The server MUST NOT delete a queue that has messages on it, if the - if-empty field is true. - - - The client declares a queue, binds it and publishes some messages into it, - then tries to delete it setting if-empty to true. - - - - - - - - - This method confirms the deletion of a queue. - - - - - Reports the number of messages deleted. - - - - - - - - - The Basic class provides methods that support an industry-standard messaging model. - - - - basic = C:QOS S:QOS-OK - / C:CONSUME S:CONSUME-OK - / C:CANCEL S:CANCEL-OK - / C:PUBLISH content - / S:RETURN content - / S:DELIVER content - / C:GET ( S:GET-OK content / S:GET-EMPTY ) - / C:ACK - / C:REJECT - / C:RECOVER-ASYNC - / C:RECOVER S:RECOVER-OK - - - - - - - - The server SHOULD respect the persistent property of basic messages and - SHOULD make a best-effort to hold persistent basic messages on a reliable - storage mechanism. - - - Send a persistent message to queue, stop server, restart server and then - verify whether message is still present. Assumes that queues are durable. - Persistence without durable queues makes no sense. - - - - - - The server MUST NOT discard a persistent basic message in case of a queue - overflow. - - - Declare a queue overflow situation with persistent messages and verify that - messages do not get lost (presumably the server will write them to disk). - - - - - - The server MAY use the Channel.Flow method to slow or stop a basic message - publisher when necessary. - - - Declare a queue overflow situation with non-persistent messages and verify - whether the server responds with Channel.Flow or not. Repeat with persistent - messages. - - - - - - The server MAY overflow non-persistent basic messages to persistent - storage. - - - - - - - The server MAY discard or dead-letter non-persistent basic messages on a - priority basis if the queue size exceeds some configured limit. - - - - - - - The server MUST implement at least 2 priority levels for basic messages, - where priorities 0-4 and 5-9 are treated as two distinct levels. - - - Send a number of priority 0 messages to a queue. Send one priority 9 - message. Consume messages from the queue and verify that the first message - received was priority 9. - - - - - - The server MAY implement up to 10 priority levels. - - - Send a number of messages with mixed priorities to a queue, so that all - priority values from 0 to 9 are exercised. A good scenario would be ten - messages in low-to-high priority. Consume from queue and verify how many - priority levels emerge. - - - - - - The server MUST deliver messages of the same priority in order irrespective of - their individual persistence. - - - Send a set of messages with the same priority but different persistence - settings to a queue. Consume and verify that messages arrive in same order - as originally published. - - - - - - The server MUST support un-acknowledged delivery of Basic content, i.e. - consumers with the no-ack field set to TRUE. - - - - - - The server MUST support explicitly acknowledged delivery of Basic content, - i.e. consumers with the no-ack field set to FALSE. - - - Declare a queue and a consumer using explicit acknowledgements. Publish a - set of messages to the queue. Consume the messages but acknowledge only - half of them. Disconnect and reconnect, and consume from the queue. - Verify that the remaining messages are received. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - This method requests a specific quality of service. The QoS can be specified for the - current channel or for all channels on the connection. The particular properties and - semantics of a qos method always depend on the content class semantics. Though the - qos method could in principle apply to both peers, it is currently meaningful only - for the server. - - - - - - - - The client can request that messages be sent in advance so that when the client - finishes processing a message, the following message is already held locally, - rather than needing to be sent down the channel. Prefetching gives a performance - improvement. This field specifies the prefetch window size in octets. The server - will send a message in advance if it is equal to or smaller in size than the - available prefetch size (and also falls into other prefetch limits). May be set - to zero, meaning "no specific limit", although other prefetch limits may still - apply. The prefetch-size is ignored if the no-ack option is set. - - - - The server MUST ignore this setting when the client is not processing any - messages - i.e. the prefetch size does not limit the transfer of single - messages to a client, only the sending in advance of more messages while - the client still has one or more unacknowledged messages. - - - Define a QoS prefetch-size limit and send a single message that exceeds - that limit. Verify that the message arrives correctly. - - - - - - - Specifies a prefetch window in terms of whole messages. This field may be used - in combination with the prefetch-size field; a message will only be sent in - advance if both prefetch windows (and those at the channel and connection level) - allow it. The prefetch-count is ignored if the no-ack option is set. - - - - The server may send less data in advance than allowed by the client's - specified prefetch windows but it MUST NOT send more. - - - Define a QoS prefetch-size limit and a prefetch-count limit greater than - one. Send multiple messages that exceed the prefetch size. Verify that - no more than one message arrives at once. - - - - - - - By default the QoS settings apply to the current channel only. If this field is - set, they are applied to the entire connection. - - - - - - - This method tells the client that the requested QoS levels could be handled by the - server. The requested QoS applies to all active consumers until a new QoS is - defined. - - - - - - - - - This method asks the server to start a "consumer", which is a transient request for - messages from a specific queue. Consumers last as long as the channel they were - declared on, or until the client cancels them. - - - - - The server SHOULD support at least 16 consumers per queue, and ideally, impose - no limit except as defined by available resources. - - - Declare a queue and create consumers on that queue until the server closes the - connection. Verify that the number of consumers created was at least sixteen - and report the total number. - - - - - - - - - - - Specifies the name of the queue to consume from. - - - - - Specifies the identifier for the consumer. The consumer tag is local to a - channel, so two clients can use the same consumer tags. If this field is - empty the server will generate a unique tag. - - - - The client MUST NOT specify a tag that refers to an existing consumer. - - - Attempt to create two consumers with the same non-empty tag, on the - same channel. - - - - - The consumer tag is valid only within the channel from which the - consumer was created. I.e. a client MUST NOT create a consumer in one - channel and then use it in another. - - - Attempt to create a consumer in one channel, then use in another channel, - in which consumers have also been created (to test that the server uses - unique consumer tags). - - - - - - - - - - - Request exclusive consumer access, meaning only this consumer can access the - queue. - - - - - The client MAY NOT gain exclusive access to a queue that already has - active consumers. - - - Open two connections to a server, and in one connection declare a shared - (non-exclusive) queue and then consume from the queue. In the second - connection attempt to consume from the same queue using the exclusive - option. - - - - - - - - - A set of arguments for the consume. The syntax and semantics of these - arguments depends on the server implementation. - - - - - - - The server provides the client with a consumer tag, which is used by the client - for methods called on the consumer at a later stage. - - - - - Holds the consumer tag specified by the client or provided by the server. - - - - - - - - - This method cancels a consumer. This does not affect already delivered - messages, but it does mean the server will not send any more messages for - that consumer. The client may receive an arbitrary number of messages in - between sending the cancel method and receiving the cancel-ok reply. - - - - - If the queue does not exist the server MUST ignore the cancel method, so - long as the consumer tag is valid for that channel. - - - TODO. - - - - - - - - - - - - - This method confirms that the cancellation was completed. - - - - - - - - - - This method publishes a message to a specific exchange. The message will be routed - to queues as defined by the exchange configuration and distributed to any active - consumers when the transaction, if any, is committed. - - - - - - - - - - Specifies the name of the exchange to publish to. The exchange name can be - empty, meaning the default exchange. If the exchange name is specified, and that - exchange does not exist, the server will raise a channel exception. - - - - - The client MUST NOT attempt to publish a content to an exchange that - does not exist. - - - The client attempts to publish a content to a non-existent exchange. - - - - - The server MUST accept a blank exchange name to mean the default exchange. - - - The client declares a queue and binds it to a blank exchange name. - - - - - If the exchange was declared as an internal exchange, the server MUST raise - a channel exception with a reply code 403 (access refused). - - - TODO. - - - - - - The exchange MAY refuse basic content in which case it MUST raise a channel - exception with reply code 540 (not implemented). - - - TODO. - - - - - - - Specifies the routing key for the message. The routing key is used for routing - messages depending on the exchange configuration. - - - - - - This flag tells the server how to react if the message cannot be routed to a - queue. If this flag is set, the server will return an unroutable message with a - Return method. If this flag is zero, the server silently drops the message. - - - - - The server SHOULD implement the mandatory flag. - - - TODO. - - - - - - - This flag tells the server how to react if the message cannot be routed to a - queue consumer immediately. If this flag is set, the server will return an - undeliverable message with a Return method. If this flag is zero, the server - will queue the message, but with no guarantee that it will ever be consumed. - - - - - The server SHOULD implement the immediate flag. - - - TODO. - - - - - - - - This method returns an undeliverable message that was published with the "immediate" - flag set, or an unroutable message published with the "mandatory" flag set. The - reply code and text provide information about the reason that the message was - undeliverable. - - - - - - - - - - Specifies the name of the exchange that the message was originally published - to. May be empty, meaning the default exchange. - - - - - - Specifies the routing key name specified when the message was published. - - - - - - - - - This method delivers a message to the client, via a consumer. In the asynchronous - message delivery model, the client starts a consumer using the Consume method, then - the server responds with Deliver methods as and when messages arrive for that - consumer. - - - - - The server SHOULD track the number of times a message has been delivered to - clients and when a message is redelivered a certain number of times - e.g. 5 - times - without being acknowledged, the server SHOULD consider the message to be - unprocessable (possibly causing client applications to abort), and move the - message to a dead letter queue. - - - TODO. - - - - - - - - - - - - Specifies the name of the exchange that the message was originally published to. - May be empty, indicating the default exchange. - - - - - Specifies the routing key name specified when the message was published. - - - - - - - - This method provides a direct access to the messages in a queue using a synchronous - dialogue that is designed for specific types of application where synchronous - functionality is more important than performance. - - - - - - - - - - - Specifies the name of the queue to get a message from. - - - - - - - This method delivers a message to the client following a get method. A message - delivered by 'get-ok' must be acknowledged unless the no-ack option was set in the - get method. - - - - - - - - - Specifies the name of the exchange that the message was originally published to. - If empty, the message was published to the default exchange. - - - - - Specifies the routing key name specified when the message was published. - - - - - - - - This method tells the client that the queue has no messages available for the - client. - - - - - - - - - - - This method acknowledges one or more messages delivered via the Deliver or Get-Ok - methods. The client can ask to confirm a single message or a set of messages up to - and including a specific message. - - - - - - - - If set to 1, the delivery tag is treated as "up to and including", so that the - client can acknowledge multiple messages with a single method. If set to zero, - the delivery tag refers to a single message. If the multiple field is 1, and the - delivery tag is zero, tells the server to acknowledge all outstanding messages. - - - - The server MUST validate that a non-zero delivery-tag refers to a delivered - message, and raise a channel exception if this is not the case. On a transacted - channel, this check MUST be done immediately and not delayed until a Tx.Commit. - Specifically, a client MUST not acknowledge the same message more than once. - - - TODO. - - - - - - - - - - This method allows a client to reject a message. It can be used to interrupt and - cancel large incoming messages, or return untreatable messages to their original - queue. - - - - - The server SHOULD be capable of accepting and process the Reject method while - sending message content with a Deliver or Get-Ok method. I.e. the server should - read and process incoming methods while sending output frames. To cancel a - partially-send content, the server sends a content body frame of size 1 (i.e. - with no data except the frame-end octet). - - - - - - The server SHOULD interpret this method as meaning that the client is unable to - process the message at this time. - - - TODO. - - - - - - The client MUST NOT use this method as a means of selecting messages to process. - - - TODO. - - - - - - - - - - If requeue is true, the server will attempt to requeue the message. If requeue - is false or the requeue attempt fails the messages are discarded or dead-lettered. - - - - - The server MUST NOT deliver the message to the same client within the - context of the current channel. The recommended strategy is to attempt to - deliver the message to an alternative consumer, and if that is not possible, - to move the message to a dead-letter queue. The server MAY use more - sophisticated tracking to hold the message on the queue and redeliver it to - the same client at a later stage. - - - TODO. - - - - - - - - - - This method asks the server to redeliver all unacknowledged messages on a - specified channel. Zero or more messages may be redelivered. This method - is deprecated in favour of the synchronous Recover/Recover-Ok. - - - - The server MUST set the redelivered flag on all messages that are resent. - - - TODO. - - - - - - If this field is zero, the message will be redelivered to the original - recipient. If this bit is 1, the server will attempt to requeue the message, - potentially then delivering it to an alternative subscriber. - - - - - - - - - This method asks the server to redeliver all unacknowledged messages on a - specified channel. Zero or more messages may be redelivered. This method - replaces the asynchronous Recover. - - - - The server MUST set the redelivered flag on all messages that are resent. - - - TODO. - - - - - - If this field is zero, the message will be redelivered to the original - recipient. If this bit is 1, the server will attempt to requeue the message, - potentially then delivering it to an alternative subscriber. - - - - - - - This method acknowledges a Basic.Recover method. - - - - - - - - - - The Tx class allows publish and ack operations to be batched into atomic - units of work. The intention is that all publish and ack requests issued - within a transaction will complete successfully or none of them will. - Servers SHOULD implement atomic transactions at least where all publish - or ack requests affect a single queue. Transactions that cover multiple - queues may be non-atomic, given that queues can be created and destroyed - asynchronously, and such events do not form part of any transaction. - Further, the behaviour of transactions with respect to the immediate and - mandatory flags on Basic.Publish methods is not defined. - - - - - Applications MUST NOT rely on the atomicity of transactions that - affect more than one queue. - - - - - Applications MUST NOT rely on the behaviour of transactions that - include messages published with the immediate option. - - - - - Applications MUST NOT rely on the behaviour of transactions that - include messages published with the mandatory option. - - - - - tx = C:SELECT S:SELECT-OK - / C:COMMIT S:COMMIT-OK - / C:ROLLBACK S:ROLLBACK-OK - - - - - - - - - - This method sets the channel to use standard transactions. The client must use this - method at least once on a channel before using the Commit or Rollback methods. - - - - - - - - This method confirms to the client that the channel was successfully set to use - standard transactions. - - - - - - - - - This method commits all message publications and acknowledgments performed in - the current transaction. A new transaction starts immediately after a commit. - - - - - - - The client MUST NOT use the Commit method on non-transacted channels. - - - The client opens a channel and then uses Tx.Commit. - - - - - - - This method confirms to the client that the commit succeeded. Note that if a commit - fails, the server raises a channel exception. - - - - - - - - - This method abandons all message publications and acknowledgments performed in - the current transaction. A new transaction starts immediately after a rollback. - Note that unacked messages will not be automatically redelivered by rollback; - if that is required an explicit recover call should be issued. - - - - - - - The client MUST NOT use the Rollback method on non-transacted channels. - - - The client opens a channel and then uses Tx.Rollback. - - - - - - - This method confirms to the client that the rollback succeeded. Note that if an - rollback fails, the server raises a channel exception. - - - - - - diff --git a/config/docker/jasmin/jasmin/store/.gitignore b/config/docker/jasmin/jasmin/store/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/config/docker/jasmin/redis/.gitignore b/config/docker/jasmin/redis/.gitignore deleted file mode 100644 index 4ef4b7b..0000000 --- a/config/docker/jasmin/redis/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.rdb \ No newline at end of file diff --git a/config/docker/sms_logger/Dockerfile b/config/docker/sms_logger/Dockerfile deleted file mode 100644 index b8598bc..0000000 --- a/config/docker/sms_logger/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -FROM python:3.11-slim - -# disable debian interactive -ARG DEBIAN_FRONTEND=noninteractive -# suppress pip upgrade warning -ARG PIP_DISABLE_PIP_VERSION_CHECK=1 -# disable cache directory, image size 2.1GB to 1.9GB -ARG PIP_NO_CACHE_DIR=1 - -RUN apt-get update && apt-get -y upgrade - -RUN apt-get install --no-install-recommends -y \ - python3-dev python3-wheel python3-setuptools virtualenv \ - build-essential gcc curl \ - libpq-dev libpq5 telnet - -RUN apt-get clean autoclean && \ - apt-get autoremove -y && \ - rm -rf /var/lib/{apt,dpkg,cache,log}/ - -# -------------------------------------- -ENV APP_DIR=/app -ENV APP_USER=app -ENV PYTHONDONTWRITEBYTECODE=1 -ENV PYTHONBUFFERED=1 - -RUN useradd -m -d ${APP_DIR} -U -r -s /bin/bash ${APP_USER} - -USER ${APP_USER} - -WORKDIR ${APP_DIR} - -# Create the virtual environment -RUN python -m venv /app/env -# Activate the virtual environment -ENV PATH="$APP_DIR/env/bin:$PATH" - -COPY config/docker/sms_logger/requirements.txt requirements.txt - -RUN pip install -U pip wheel - -RUN pip install -r requirements.txt - -COPY --chown=$APP_USER config/docker/sms_logger/*.py . -COPY --chown=$APP_USER config/docker/sms_logger/docker-entrypoint.sh docker-entrypoint.sh - -RUN mkdir -p $APP_DIR/resource - -ENTRYPOINT ["bash", "docker-entrypoint.sh"] - diff --git a/config/docker/sms_logger/docker-entrypoint.sh b/config/docker/sms_logger/docker-entrypoint.sh deleted file mode 100755 index 493e488..0000000 --- a/config/docker/sms_logger/docker-entrypoint.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -APP_DIR=${APP_DIR:-'/app'} -APP_LOG_LEVEL=${APP_LOG_LEVEL:-'warn'} - -# shellcheck disable=SC2164 -cd "$APP_DIR" - -source "$APP_DIR"/env/bin/activate - -"$APP_DIR"/env/bin/python sms_logger.py \ No newline at end of file diff --git a/config/docker/sms_logger/requirements.txt b/config/docker/sms_logger/requirements.txt deleted file mode 100644 index 9eed332..0000000 --- a/config/docker/sms_logger/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -Twisted~=22.1.0 -txAMQP3~=0.9.3 -smpp.pdu3~=0.6 -smpp.twisted3~=0.7 -service_identity~=18.1.0 -python-dotenv -jasmin -psycopg2 -mysql-connector-python diff --git a/config/docker/sms_logger/sms_logger.py b/config/docker/sms_logger/sms_logger.py deleted file mode 100644 index 356f1a7..0000000 --- a/config/docker/sms_logger/sms_logger.py +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/env python -"""This script will log all sent sms through Jasmin with user information. - -Requirement: -- Activate publish_submit_sm_resp in jasmin.cfg -- Install psycopg2: # Used for PostgreSQL connection - + pip install psycopg2 -- Install mysql.connector: # Used for MySQL connection - + pip install mysql-connector-python - -Optional: -- SET ENVIRONMENT ENV: - + DB_TYPE_MYSQL # Default: 1 # 1 for MySQL, 0 for PostgreSQL - + DB_HOST # Default: 127.0.0.1 # IP or Docker container name - + DB_DATABASE # Default: jasmin # should Exist - + DB_TABLE # Default: submit_log # the script will create it if it doesn't Exist - + DB_USER # Default: jasmin # for the Database connection. - + DB_PASS # Default: jadmin # for the Database connection - + AMQP_BROKER_HOST # Default: 127.0.0.1 # RabbitMQ host used by Jasmin SMS Gateway. IP or Docker container name - + AMQP_BROKER_PORT # Default: 5672 # RabbitMQ port used by Jasmin SMS Gateway. IP or Docker container name - -Database Scheme: -- MySQL table: - CREATE TABLE ${DB_TABLE} ( - `msgid` VARCHAR(45) PRIMARY KEY, - `source_connector` VARCHAR(15), - `routed_cid` VARCHAR(30), - `source_addr` VARCHAR(40), - `destination_addr` VARCHAR(40) NOT NULL CHECK (`destination_addr` <> ''), - `rate` DECIMAL(12, 7), - `charge` DECIMAL(12, 7), - `pdu_count` TINYINT(3) DEFAULT 1, - `short_message` BLOB, - `binary_message` BLOB, - `status` VARCHAR(15) NOT NULL CHECK (`status` <> ''), - `uid` VARCHAR(15) NOT NULL CHECK (`uid` <> ''), - `trials` TINYINT(4) DEFAULT 1, - `created_at` DATETIME NOT NULL, - `status_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - INDEX (`source_connector`), - INDEX (`routed_cid`), - INDEX (`source_addr`), - INDEX (`destination_addr`), - INDEX (`status`), - INDEX (`uid`), - INDEX (`created_at`), - INDEX (`created_at`, `uid`), - INDEX (`created_at`, `uid`, `status`), - INDEX (`created_at`, `routed_cid`), - INDEX (`created_at`, `routed_cid`, `status`), - INDEX (`created_at`, `source_connector`), - INDEX (`created_at`, `source_connector`, `status`) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; -- PostgreSQL table: - CREATE TABLE IF NOT EXISTS ${DB_TABLE} ( - msgid VARCHAR(45) NOT NULL PRIMARY KEY, - source_connector VARCHAR(15) NULL DEFAULT NULL, - routed_cid VARCHAR(30) NULL DEFAULT NULL, - source_addr VARCHAR(40) NULL DEFAULT NULL, - destination_addr VARCHAR(40) NOT NULL CHECK (destination_addr <> ''), - rate DECIMAL(12,7) NULL DEFAULT NULL, - charge DECIMAL(12,7) NULL DEFAULT NULL, - pdu_count SMALLINT NULL DEFAULT '1', - short_message BYTEA NULL DEFAULT NULL, - binary_message BYTEA NULL DEFAULT NULL, - status VARCHAR(15) NOT NULL CHECK (status <> ''), - uid VARCHAR(15) NOT NULL CHECK (uid <> ''), - trials SMALLINT NULL DEFAULT '1', - created_at TIMESTAMP(0) NOT NULL, - status_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - CREATE INDEX ON ${DB_TABLE} (source_connector); - CREATE INDEX ON ${DB_TABLE} (routed_cid); - CREATE INDEX ON ${DB_TABLE} (source_addr); - CREATE INDEX ON ${DB_TABLE} (destination_addr); - CREATE INDEX ON ${DB_TABLE} (status); - CREATE INDEX ON ${DB_TABLE} (uid); - CREATE INDEX ON ${DB_TABLE} (created_at); - CREATE INDEX ON ${DB_TABLE} (created_at, uid); - CREATE INDEX ON ${DB_TABLE} (created_at, uid, status); - CREATE INDEX ON ${DB_TABLE} (created_at, routed_cid); - CREATE INDEX ON ${DB_TABLE} (created_at, routed_cid, status); - CREATE INDEX ON ${DB_TABLE} (created_at, source_connector); - CREATE INDEX ON ${DB_TABLE} (created_at, source_connector, status); -""" - -import os -from time import sleep -import pickle as pickle -import binascii -from datetime import datetime -from twisted.internet.defer import inlineCallbacks -from twisted.internet import reactor -from twisted.internet.protocol import ClientCreator -from twisted.python import log -from txamqp.protocol import AMQClient -from txamqp.client import TwistedDelegate -import txamqp.spec - -from smpp.pdu.pdu_types import DataCoding - -from mysql.connector import connect as _mysql_connect -from psycopg2 import pool as _postgres_pool -from psycopg2 import Error as _postgres_error - -q = {} - -# Database connection parameters -db_type_mysql = int(os.getenv('DB_TYPE_MYSQL', '1')) == 1 -db_host = os.getenv('DB_HOST', '127.0.0.1') -db_database = os.getenv('DB_DATABASE', 'jasmin') -db_table = os.getenv('DB_TABLE', 'submit_log') -db_user = os.getenv('DB_USER', 'jasmin') -db_pass = os.getenv('DB_PASS', 'jadmin') -# AMQB broker connection parameters -amqp_broker_host = os.getenv('AMQP_BROKER_HOST', '127.0.0.1') -amqp_broker_port = int(os.getenv('AMQP_BROKER_PORT', '5672')) - - -def get_psql_conn(): - psql_pool = _postgres_pool.SimpleConnectionPool( - 1, - 20, - user=db_user, - password=db_pass, - host=db_host, - database=db_database) - return psql_pool.getconn() - - -def get_mysql_conn(): - return _mysql_connect( - user=db_user, - password=db_pass, - host=db_host, - database=db_database, - pool_name="mypool", - pool_size=20) - - -@inlineCallbacks -def gotConnection(conn, username, password): - print("*** Connected to broker, authenticating: %s" % username, flush=True) - yield conn.start({"LOGIN": username, "PASSWORD": password}) - - print("*** Authenticated. Ready to receive messages", flush=True) - chan = yield conn.channel(1) - yield chan.channel_open() - - yield chan.queue_declare(queue="sms_logger_queue") - - # Bind to submit.sm.* and submit.sm.resp.* routes to track sent messages - yield chan.queue_bind(queue="sms_logger_queue", exchange="messaging", routing_key='submit.sm.*') - yield chan.queue_bind(queue="sms_logger_queue", exchange="messaging", routing_key='submit.sm.resp.*') - # Bind to dlr_thrower.* to track DLRs - yield chan.queue_bind(queue="sms_logger_queue", exchange="messaging", routing_key='dlr_thrower.*') - - yield chan.basic_consume(queue='sms_logger_queue', no_ack=False, consumer_tag="sms_logger") - queue = yield conn.queue("sms_logger") - - if db_type_mysql: - db_conn = get_mysql_conn() - if db_conn: - print("*** Pooling 20 connections", flush=True) - print("*** Connected to MySQL", flush=True) - else: - db_conn = get_psql_conn() - if db_conn: - print("*** Pooling 20 connections", flush=True) - print("*** Connected to psql", flush=True) - - cursor = db_conn.cursor() - - if db_type_mysql: - create_table = ("""CREATE TABLE IF NOT EXISTS {} ( - `msgid` VARCHAR(45) PRIMARY KEY, - `source_connector` VARCHAR(15), - `routed_cid` VARCHAR(30), - `source_addr` VARCHAR(40), - `destination_addr` VARCHAR(40) NOT NULL CHECK (`destination_addr` <> ''), - `rate` DECIMAL(12, 7), - `charge` DECIMAL(12, 7), - `pdu_count` TINYINT(3) DEFAULT 1, - `short_message` BLOB, - `binary_message` BLOB, - `status` VARCHAR(15) NOT NULL CHECK (`status` <> ''), - `uid` VARCHAR(15) NOT NULL CHECK (`uid` <> ''), - `trials` TINYINT(4) DEFAULT 1, - `created_at` DATETIME NOT NULL, - `status_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - INDEX (`source_connector`), - INDEX (`routed_cid`), - INDEX (`source_addr`), - INDEX (`destination_addr`), - INDEX (`status`), - INDEX (`uid`), - INDEX (`created_at`), - INDEX (`created_at`, `uid`), - INDEX (`created_at`, `uid`, `status`), - INDEX (`created_at`, `routed_cid`), - INDEX (`created_at`, `routed_cid`, `status`), - INDEX (`created_at`, `source_connector`), - INDEX (`created_at`, `source_connector`, `status`) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;""".format(db_table)) - else: - create_table = ("""CREATE TABLE IF NOT EXISTS {} ( - msgid VARCHAR(45) NOT NULL PRIMARY KEY, - source_connector VARCHAR(15) NULL DEFAULT NULL, - routed_cid VARCHAR(30) NULL DEFAULT NULL, - source_addr VARCHAR(40) NULL DEFAULT NULL, - destination_addr VARCHAR(40) NOT NULL CHECK (destination_addr <> ''), - rate DECIMAL(12,7) NULL DEFAULT NULL, - charge DECIMAL(12,7) NULL DEFAULT NULL, - pdu_count SMALLINT NULL DEFAULT '1', - short_message BYTEA NULL DEFAULT NULL, - binary_message BYTEA NULL DEFAULT NULL, - status VARCHAR(15) NOT NULL CHECK (status <> ''), - uid VARCHAR(15) NOT NULL CHECK (uid <> ''), - trials SMALLINT NULL DEFAULT '1', - created_at TIMESTAMP(0) NOT NULL, - status_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - CREATE INDEX ON {} (source_connector); - CREATE INDEX ON {} (routed_cid); - CREATE INDEX ON {} (source_addr); - CREATE INDEX ON {} (destination_addr); - CREATE INDEX ON {} (status); - CREATE INDEX ON {} (uid); - CREATE INDEX ON {} (created_at); - CREATE INDEX ON {} (created_at, uid); - CREATE INDEX ON {} (created_at, uid, status); - CREATE INDEX ON {} (created_at, routed_cid); - CREATE INDEX ON {} (created_at, routed_cid, status); - CREATE INDEX ON {} (created_at, source_connector); - CREATE INDEX ON {} (created_at, source_connector, status); - """.format(db_table, db_table, db_table, - db_table, db_table, db_table, - db_table, db_table, db_table, - db_table, db_table, db_table, - db_table, db_table, )) - - cursor.execute(create_table) - if cursor.rowcount > 0: - print('*** {} table was created successfully'.format(db_table), flush=True) - else: - print('*** {} table already exist'.format(db_table), flush=True) - - db_conn.commit() - - # Wait for messages - # This can be done through a callback ... - while True: - msg = yield queue.get() - props = msg.content.properties - - if db_type_mysql: - db_conn.ping(reconnect=True, attempts=10, delay=1) - else: - check_connection = True - while check_connection: - try: - cursor = db_conn.cursor() - cursor.execute('SELECT 1') - check_connection = False - except _postgres_error: - print('*** PostgreSQL connection exception. Trying to reconnect', flush=True) - db_conn = get_psql_conn() - if db_conn: - print("*** Pooling 20 connections", flush=True) - print("*** Re-connected to psql", flush=True) - cursor = db_conn.cursor() - pass - - if msg.routing_key[:10] == 'submit.sm.' and msg.routing_key[:15] != 'submit.sm.resp.': - pdu = pickle.loads(msg.content.body) - pdu_count = 1 - short_message = pdu.params['short_message'] - billing = props['headers'] - billing_pickle = billing.get('submit_sm_resp_bill') - if not billing_pickle: - billing_pickle = billing.get('submit_sm_bill') - if billing_pickle is not None: - submit_sm_bill = pickle.loads(billing_pickle) - else: - submit_sm_bill = None - source_connector = props['headers']['source_connector'] - routed_cid = msg.routing_key[10:] - - # Is it a multipart message ? - while hasattr(pdu, 'nextPdu'): - # Remove UDH from first part - if pdu_count == 1: - short_message = short_message[6:] - - pdu = pdu.nextPdu - - # Update values: - pdu_count += 1 - short_message += pdu.params['short_message'][6:] - - # Save short_message bytes - binary_message = binascii.hexlify(short_message) - - # If it's a binary message, assume it's utf_16_be encoded - if pdu.params['data_coding'] is not None: - dc = pdu.params['data_coding'] - if (isinstance(dc, int) and dc == 8) or (isinstance(dc, DataCoding) and str(dc.schemeData) == 'UCS2'): - short_message = short_message.decode('utf_16_be', 'ignore').encode('utf_8') - - q[props['message-id']] = { - 'source_connector': source_connector, - 'routed_cid': routed_cid, - 'rate': 0, - 'charge': 0, - 'uid': 0, - 'destination_addr': pdu.params['destination_addr'], - 'source_addr': pdu.params['source_addr'], - 'pdu_count': pdu_count, - 'short_message': short_message, - 'binary_message': binary_message, - } - if submit_sm_bill is not None: - q[props['message-id']]['rate'] = submit_sm_bill.getTotalAmounts() - q[props['message-id']]['charge'] = submit_sm_bill.getTotalAmounts() * pdu_count - q[props['message-id']]['uid'] = submit_sm_bill.user.uid - elif msg.routing_key[:15] == 'submit.sm.resp.': - # It's a submit_sm_resp - - pdu = pickle.loads(msg.content.body) - if props['message-id'] not in q: - print('*** Got resp of an unknown submit_sm: %s' % props['message-id'], flush=True) - chan.basic_ack(delivery_tag=msg.delivery_tag) - continue - - qmsg = q[props['message-id']] - - if qmsg['source_addr'] is None: - qmsg['source_addr'] = '' - - insert_log = ("""INSERT INTO {} (msgid, source_addr, rate, pdu_count, charge, - destination_addr, short_message, - status, uid, created_at, binary_message, - routed_cid, source_connector, status_at) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) - ON DUPLICATE KEY UPDATE trials = trials + 1;""".format(db_table)) - - cursor.execute(insert_log, ( - props['message-id'], - qmsg['source_addr'], - qmsg['rate'], - qmsg['pdu_count'], - qmsg['charge'], - qmsg['destination_addr'], - qmsg['short_message'], - pdu.status, - qmsg['uid'], - props['headers']['created_at'], - qmsg['binary_message'], - qmsg['routed_cid'], - qmsg['source_connector'], - props['headers']['created_at'],)) - db_conn.commit() - elif msg.routing_key[:12] == 'dlr_thrower.': - if props['headers']['message_status'][:5] == 'ESME_': - # Ignore dlr from submit_sm_resp - chan.basic_ack(delivery_tag=msg.delivery_tag) - continue - - # It's a dlr - if props['message-id'] not in q: - print('*** Got dlr of an unknown submit_sm: %s' % props['message-id'], flush=True) - chan.basic_ack(delivery_tag=msg.delivery_tag) - continue - - # Update message status - qmsg = q[props['message-id']] - update_log = ("UPDATE submit_log SET status = %s, status_at = %s WHERE msgid = %s;".format(db_table)) - cursor.execute(update_log, ( - props['headers']['message_status'], - datetime.now(), - props['message-id'],)) - db_conn.commit() - else: - print('*** unknown route: %s' % msg.routing_key, flush=True) - - chan.basic_ack(delivery_tag=msg.delivery_tag) - - # A clean way to tear down and stop - yield chan.basic_cancel("sms_logger") - yield chan.channel_close() - chan0 = yield conn.channel(0) - yield chan0.connection_close() - - reactor.stop() - - -if __name__ == "__main__": - sleep(2) - print(' ', flush=True) - print(' ', flush=True) - print('***************** sms_logger *****************', flush=True) - if db_type_mysql == 1: - print('*** Staring sms_logger, DB drive: MySQL', flush=True) - else: - print('*** Staring sms_logger, DB drive: PostgreSQL', flush=True) - print('**********************************************', flush=True) - - host = amqp_broker_host - port = amqp_broker_port - vhost = '/' - username = 'guest' - password = 'guest' - spec_file = os.environ.get("AMQP_SPEC_FILE", '/etc/jasmin/resource/amqp0-9-1.xml') - - spec = txamqp.spec.load(spec_file) - - # Connect and authenticate - d = ClientCreator(reactor, - AMQClient, - delegate=TwistedDelegate(), - vhost=vhost, - spec=spec).connectTCP(host, port) - d.addCallback(gotConnection, username, password) - - - def whoops(err): - if reactor.running: - log.err(err) - reactor.stop() - - - d.addErrback(whoops) - - reactor.run() diff --git a/docker-compose.yml b/docker-compose.yml index 2f078d8..5bf74f7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,9 +2,9 @@ version: '3.8' services: jasmin-web: - image: tarekaec/jasmin_web_panel:latest + image: tarekaec/jasmin_web_panel:1.3 ports: - - "${JASMIN_WEB_PORT:-8000}:8000" + - "${JASMIN_WEB_PORT:-8999}:8000" deploy: replicas: 1 env_file: @@ -22,8 +22,11 @@ services: depends_on: - redis - db + - rabbit-mq + restart: unless-stopped + jasmin-celery: - image: tarekaec/jasmin_web_panel:latest + image: tarekaec/jasmin_web_panel:1.3 entrypoint: bash ./docker-entrypoint-celery.sh deploy: replicas: 1 @@ -40,6 +43,8 @@ services: depends_on: - redis - db + restart: unless-stopped + redis: image: redis:alpine tty: true @@ -64,9 +69,13 @@ services: memory: ${REDIS_MEM:-512M} security_opt: - no-new-privileges:true + rabbit-mq: image: rabbitmq:3.10-management-alpine restart: unless-stopped + environment: + RABBITMQ_DEFAULT_USER: guest + RABBITMQ_DEFAULT_PASS: guest healthcheck: test: rabbitmq-diagnostics -q ping deploy: @@ -76,30 +85,40 @@ services: memory: ${RABBITMQ_MEM:-512M} security_opt: - no-new-privileges:true + db: - image: postgres:16.0-alpine + image: postgres:18.0-alpine restart: unless-stopped volumes: - postgres_data:/var/lib/postgresql/data/ environment: POSTGRES_DB: "${POSTGRES_DB:-jasmin}" POSTGRES_USER: "${POSTGRES_USER:-jasmin}" - POSTGRES_PASSWORD: "${POSTGRES_PASSWORD:-top_secret}" + POSTGRES_PASSWORD: "${POSTGRES_PASSWORD:-jasmin}" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-jasmin}"] + interval: 10s + timeout: 5s + retries: 5 + jasmin: image: jookies/jasmin:latest restart: unless-stopped + volumes: + - ./jasmin_config/resource:/etc/jasmin/resource + - ./jasmin_config/store:/etc/jasmin/store + - ./jasmin_config:/etc/jasmin ports: - "${JASMIN_SMS_PORT:-2775}:2775" - "${JASMIN_DASHBOARD_PORT:-8990}:8990" - "${JASMIN_HTTP_API_PORT:-1401}:1401" depends_on: - redis: - condition: service_healthy - rabbit-mq: - condition: service_healthy + - redis + - rabbit-mq environment: REDIS_CLIENT_HOST: redis AMQP_BROKER_HOST: rabbit-mq + AMQP_BROKER_PORT: 5672 deploy: resources: limits: @@ -107,72 +126,32 @@ services: memory: ${JASMIN_MEM:-512M} security_opt: - no-new-privileges:true + sms_logger: image: jookies/jasmin:latest - command: bash -c "pip install psycopg2-binary mysql-connector-python && exec python /build/misc/scripts/sms_logger.py" volumes: - - /opt/jasmin-web-panel/config/docker/jasmin/jasmin/resource:/app/resource + - ./jasmin_config/resource:/etc/jasmin/resource + - ./jasmin_config:/etc/jasmin + - ./sms_logger.py:/build/misc/scripts/sms_logger.py:ro + command: bash -c "sleep 15 && pip install -U pip psycopg2-binary mysql-connector-python && exec python /build/misc/scripts/sms_logger.py" environment: DB_TYPE_MYSQL: ${DB_TYPE_MYSQL:-0} AMQP_BROKER_HOST: ${AMQP_BROKER_HOST:-rabbit-mq} AMQP_BROKER_PORT: ${AMQP_BROKER_PORT:-5672} - AMQP_SPEC_FILE: '/app/resource/amqp0-9-1.xml' + AMQP_SPEC_FILE: '/etc/jasmin/resource/amqp0-9-1.xml' DB_HOST: ${DB_HOST:-db} DB_DATABASE: ${DB_DATABASE:-jasmin} DB_TABLE: ${DB_TABLE:-submit_log} DB_USER: ${DB_USER:-jasmin} - DB_PASS: ${DB_PASS:-top_secret} + DB_PASS: ${DB_PASS:-jasmin} depends_on: - rabbit-mq - db + - jasmin restart: unless-stopped healthcheck: disable: true - prometheus: - image: prom/prometheus:latest - restart: unless-stopped - ports: - - "${PROMETHEUS_PORT:-9090}:9090" - volumes: - - ./prometheus.yml:/etc/prometheus/prometheus.yml - - monitoring_data:/prometheus - command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--storage.tsdb.path=/prometheus' - - '--web.console.libraries=/etc/prometheus/console_libraries' - - '--web.console.templates=/etc/prometheus/consoles' - - '--web.enable-lifecycle' - depends_on: - - jasmin - deploy: - resources: - limits: - cpus: "${PROMETHEUS_CPU:-2}" - memory: ${PROMETHEUS_MEM:-512M} - security_opt: - - no-new-privileges:true - grafana: - image: grafana/grafana - restart: unless-stopped - ports: - - "${GRAFANA_PORT:-2345}:3000" - environment: - GF_INSTALL_PLUGINS: "grafana-clock-panel,grafana-simple-json-datasource" - volumes: - - ./provisioning/datasources:/etc/grafana/provisioning/datasources:ro - - ./provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro - - ./dashboards:/opt/grafana-dashboards:ro - - monitoring_data:/var/lib/grafana - depends_on: - - prometheus - deploy: - resources: - limits: - cpus: "${GRAFANA_CPU:-2}" - memory: ${GRAFANA_MEM:-512M} - security_opt: - - no-new-privileges:true volumes: web_public: driver: local diff --git a/main/web/templates/web/content/submit_logs.html b/main/web/templates/web/content/submit_logs.html index e365f60..4f37d1f 100644 --- a/main/web/templates/web/content/submit_logs.html +++ b/main/web/templates/web/content/submit_logs.html @@ -5,12 +5,30 @@ {% block content %}

{% trans "Submit Logs" %}

- +
+
+
+ +
+
+ +
+ + {% trans "Reset" %} + +
+
+ {% trans "Total" %}: {{ stats.total_count }} + {% trans "Success" %}: {{ stats.success_count }} + {% trans "Failed" %}: {{ stats.fail_count }} + {% trans "Unknown" %}: {{ stats.unknown_count }} +
{% trans "Message ID" %}{% trans "Message ID" %} {% trans "Src. Address" %} {% trans "Dst. Address" %}{% trans "Short message" %} {% trans "Rate" %} {% trans "PDU Count" %} {% trans "UID" %}
{{ record.msgid }}{{ record.source_addr }}{{ record.destination_addr }}{{ record.decoded_source_addr }}{{ record.decoded_destination_addr }}{{ record.decoded_short_message }} {{ record.rate }} {{ record.pdu_count }} {{ record.uid }}
@@ -42,21 +60,23 @@

- + {% empty %} - + {% endfor %}
{{ record.trials }} {{ record.created_at }} {{ record.status_at }}{{ record.status }} + {% if record.status == 'ESME_ROK' or record.status == 'ESME_RINVNUMDESTS' %} + {{ record.status }} + {% elif record.status == 'ESME_RDELIVERYFAILURE' %} + {{ record.status }} + {% else %} + {{ record.status }} + {% endif %} +

{% trans "No Submit Logs" %}

{% trans "No Submit Logs" %}
-
- Total: {{ stats.total_count }} - Success: {{ stats.success_count }} - Failed: {{ stats.fail_count }} - Unknown: {{ stats.unknown_count }} -
{% include "web/includes/paginate.html" with page_obj=submit_logs %} diff --git a/main/web/views/content/submit_logs.py b/main/web/views/content/submit_logs.py index b094020..f3ce5db 100644 --- a/main/web/views/content/submit_logs.py +++ b/main/web/views/content/submit_logs.py @@ -1,7 +1,7 @@ from django.contrib.auth.decorators import login_required from django.shortcuts import render from django.http import JsonResponse -from django.db.models import Count, Case, When, IntegerField +from django.db.models import Count, Case, When, IntegerField, Q from main.core.models import SubmitLog from main.core.utils import paginate @@ -10,18 +10,56 @@ @login_required def submit_logs_view(request): - stats = SubmitLog.objects.aggregate( + # Get search and filter parameters + search_query = request.GET.get('search', '').strip() + status_filter = request.GET.get('status_filter', '') + + # Start with all logs + submit_logs = SubmitLog.objects.all() + + # Apply search filter + if search_query: + submit_logs = submit_logs.filter( + Q(msgid__icontains=search_query) | + Q(source_addr__icontains=search_query) | + Q(destination_addr__icontains=search_query) | + Q(short_message__icontains=search_query) | + Q(uid__icontains=search_query) + ) + + # Apply status filter + if status_filter == 'success': + submit_logs = submit_logs.filter(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS']) + elif status_filter == 'fail': + submit_logs = submit_logs.filter(status='ESME_RDELIVERYFAILURE') + elif status_filter == 'unknown': + submit_logs = submit_logs.exclude(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS', 'ESME_RDELIVERYFAILURE']) + + # Calculate statistics (on all logs, not filtered) + all_logs = SubmitLog.objects.all() + stats = all_logs.aggregate( total_count=Count('id'), - success_count=Count(Case(When(status="success", then=1), output_field=IntegerField())), - fail_count=Count(Case(When(status="fail", then=1), output_field=IntegerField())), - unknown_count=Count(Case(When(status="unknown", then=1), output_field=IntegerField())), + success_count=Count(Case( + When(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS'], then=1), + output_field=IntegerField() + )), + fail_count=Count(Case( + When(status='ESME_RDELIVERYFAILURE', then=1), + output_field=IntegerField() + )), ) - submit_logs = SubmitLog.objects.order_by("-created_at") - + # Calculate unknown count (all - success - fail) + stats['unknown_count'] = stats['total_count'] - stats['success_count'] - stats['fail_count'] + + # Order and paginate + submit_logs = submit_logs.order_by("-created_at") submit_logs = paginate(submit_logs, per_page=25, page=request.GET.get("page")) + return render(request, "web/content/submit_logs.html", context={ "submit_logs": submit_logs, "stats": stats, + "search_query": search_query, + "status_filter": status_filter, }) diff --git a/pyproject.toml b/pyproject.toml index b770f81..85f537b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "django-environ>=0.12.0,<0.13.0", "djangorestframework>=3.13.1,<4.0", "pexpect>=4.8.0,<5.0", - "pillow>=11.2.1,<12.0", + "pillow>=12.0.0,<13.0", "python-dateutil>=2.9.0.post0,<3.0", "requests>=2.32.3,<3.0", "ua-parser>=1.0.1,<2.0", From 278af59d020d616498598fdcf38d56ac1e77241d Mon Sep 17 00:00:00 2001 From: Tarek Kalaji Date: Fri, 31 Oct 2025 18:10:10 +0300 Subject: [PATCH 4/6] feat: add export functionality for submit logs with progress tracking - Added new Celery task for exporting submit logs to CSV/Excel with filtering options - Implemented Redis-based caching for export progress and file storage - Added database connection handling for Celery tasks to prevent leaks - Created frontend progress tracking with modal and real-time updates - Added support for both CSV and XLSX export formats with proper styling - Implemented configurable filters for date range, status, --- config/celery.py | 24 +- config/settings/com.py | 9 + main/core/tasks/__init__.py | 1 + main/core/tasks/export_submit_logs.py | 243 ++++++++++++++++++ main/web/static/web/content/submit_logs.js | 179 +++++++++++++ main/web/static/web/dashboard.js | 192 ++++++++++++++ .../templates/web/content/submit_logs.html | 179 +++++++++++-- main/web/templates/web/dashboard.html | 83 +++++- main/web/urls.py | 3 + main/web/views/content/__init__.py | 32 ++- main/web/views/content/smppccm.py | 1 + main/web/views/content/submit_logs.py | 107 +++++++- main/web/views/home.py | 96 ++++++- pyproject.toml | 1 + 14 files changed, 1121 insertions(+), 29 deletions(-) create mode 100644 main/core/tasks/export_submit_logs.py diff --git a/config/celery.py b/config/celery.py index 2f2a444..0d30e43 100644 --- a/config/celery.py +++ b/config/celery.py @@ -1,13 +1,17 @@ import os +import django + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.pro") + +# Setup Django before importing any Django modules +django.setup() + from django.conf import settings from celery import Celery from celery.utils.log import get_task_logger - from django.utils import timezone logger = get_task_logger(__name__) - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.pro") CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", default="redis://localhost:6379/0") CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND", default="redis://localhost:6379/0") @@ -21,6 +25,20 @@ app.conf.broker_connection_retry_on_startup = True app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, related_name='tasks') +# Close database connections after each task to prevent leaks +from django.db import connection +from celery.signals import task_postrun, task_prerun + +@task_prerun.connect +def task_prerun_handler(sender=None, **kwargs): + """Close database connections before task starts.""" + connection.close() + +@task_postrun.connect +def task_postrun_handler(sender=None, **kwargs): + """Close database connections after task completes.""" + connection.close() + BROKER_CONNECTION_TIMEOUT = 120 CELERY_DEFAULT_UP_TIME = timezone.now() diff --git a/config/settings/com.py b/config/settings/com.py index 3c277ff..c7501bb 100644 --- a/config/settings/com.py +++ b/config/settings/com.py @@ -146,6 +146,15 @@ REDIS_DB = int(os.environ.get("REDIS_DB", default=0)) REDIS_URL = (REDIS_HOST, REDIS_PORT) +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.redis.RedisCache', + 'LOCATION': f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}', + 'KEY_PREFIX': 'jasmin_cache', + 'TIMEOUT': 300, # 5 minutes default + } +} + DEFAULT_USER_AVATAR = STATIC_URL + "assets/img/user.png" DEFAULT_USER_FOLDER = "users" LAST_ACTIVITY_INTERVAL_SECS = 3600 diff --git a/main/core/tasks/__init__.py b/main/core/tasks/__init__.py index 42cdc87..9b6dff6 100644 --- a/main/core/tasks/__init__.py +++ b/main/core/tasks/__init__.py @@ -1 +1,2 @@ from .mail_html import mail_html_mails, mail_html_envelopes +from .export_submit_logs import export_submit_logs_task diff --git a/main/core/tasks/export_submit_logs.py b/main/core/tasks/export_submit_logs.py new file mode 100644 index 0000000..c40e6df --- /dev/null +++ b/main/core/tasks/export_submit_logs.py @@ -0,0 +1,243 @@ +import csv +import io +from datetime import datetime +from celery import shared_task +from celery.utils.log import get_task_logger +from django.core.cache import cache +from openpyxl import Workbook +from openpyxl.styles import Font, PatternFill, Alignment +from openpyxl.utils import get_column_letter + +from main.core.models import SubmitLog + +logger = get_task_logger(__name__) + + +@shared_task(bind=True) +def export_submit_logs_task(self, filters, export_format='csv'): + """ + Export submit logs to CSV or Excel format. + + Args: + filters: Dictionary containing filter parameters + export_format: 'csv' or 'xlsx' + + Returns: + Dictionary with status and file path or error message + """ + task_id = None + try: + task_id = self.request.id + logger.info(f"=== TASK STARTED === ID: {task_id}") + logger.info(f"Export format: {export_format}") + logger.info(f"Filters: {filters}") + + # Test database connection + logger.info("Testing database connection...") + from django.db import connection + with connection.cursor() as cursor: + cursor.execute("SELECT 1") + logger.info("Database connection OK") + + # Update progress: Starting + logger.info("Setting initial cache progress...") + cache.set(f'export_progress_{task_id}', {'status': 'processing', 'progress': 0}, 300) + logger.info("Cache progress set successfully") + + # Build queryset based on filters + logger.info("Building queryset...") + queryset = SubmitLog.objects.all() + logger.info(f"Initial queryset created") + + # Apply date range filters + if filters.get('date_column') and filters.get('date_from'): + date_column = filters['date_column'] # 'created_at' or 'status_at' + date_from = datetime.fromisoformat(filters['date_from']) + queryset = queryset.filter(**{f'{date_column}__gte': date_from}) + + if filters.get('date_column') and filters.get('date_to'): + date_column = filters['date_column'] + date_to = datetime.fromisoformat(filters['date_to']) + queryset = queryset.filter(**{f'{date_column}__lte': date_to}) + + # Apply search filter + if filters.get('search'): + from django.db.models import Q + search_query = filters['search'] + queryset = queryset.filter( + Q(msgid__icontains=search_query) | + Q(source_addr__icontains=search_query) | + Q(destination_addr__icontains=search_query) | + Q(short_message__icontains=search_query) | + Q(uid__icontains=search_query) + ) + + # Apply status filter + if filters.get('status_filter') == 'success': + queryset = queryset.filter(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS']) + elif filters.get('status_filter') == 'fail': + queryset = queryset.filter(status='ESME_RDELIVERYFAILURE') + elif filters.get('status_filter') == 'unknown': + queryset = queryset.exclude(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS', 'ESME_RDELIVERYFAILURE']) + + # Order by created_at + logger.info("Ordering queryset by created_at...") + queryset = queryset.order_by('-created_at') + + logger.info("Counting total records...") + total_records = queryset.count() + logger.info(f"Total records to export: {total_records}") + + if total_records == 0: + cache.set(f'export_progress_{task_id}', { + 'status': 'completed', + 'progress': 100, + 'error': 'No records found with the given filters' + }, 300) + return {'status': 'error', 'message': 'No records found'} + + # Update progress: Fetching data + cache.set(f'export_progress_{task_id}', {'status': 'processing', 'progress': 10}, 300) + + # Define headers + headers = [ + 'Message ID', 'Source Connector', 'Routed CID', 'Source Address', + 'Destination Address', 'Rate', 'PDU Count', 'Short Message', + 'Status', 'UID', 'Trials', 'Created At', 'Status At' + ] + + if export_format == 'csv': + # Generate CSV + output = io.StringIO() + writer = csv.writer(output) + writer.writerow(headers) + + # Write data in batches + batch_size = 1000 + processed = 0 + + for record in queryset.iterator(chunk_size=batch_size): + writer.writerow([ + record.msgid, + record.source_connector, + record.routed_cid, + record.decoded_source_addr, + record.decoded_destination_addr, + str(record.rate), + record.pdu_count, + record.decoded_short_message, + record.status, + record.uid, + record.trials, + record.created_at.strftime('%Y-%m-%d %H:%M:%S') if record.created_at else '', + record.status_at.strftime('%Y-%m-%d %H:%M:%S') if record.status_at else '', + ]) + + processed += 1 + if processed % 100 == 0: + progress = 10 + int((processed / total_records) * 80) + cache.set(f'export_progress_{task_id}', { + 'status': 'processing', + 'progress': progress, + 'processed': processed, + 'total': total_records + }, 300) + + content = output.getvalue() + content_type = 'text/csv' + filename = f'submit_logs_{datetime.now().strftime("%Y%m%d_%H%M%S")}.csv' + + else: # xlsx + # Generate Excel + wb = Workbook() + ws = wb.active + ws.title = "Submit Logs" + + # Style for header + header_fill = PatternFill(start_color="366092", end_color="366092", fill_type="solid") + header_font = Font(bold=True, color="FFFFFF") + header_alignment = Alignment(horizontal="center", vertical="center") + + # Write headers + for col_num, header in enumerate(headers, 1): + cell = ws.cell(row=1, column=col_num) + cell.value = header + cell.fill = header_fill + cell.font = header_font + cell.alignment = header_alignment + + # Write data in batches + batch_size = 1000 + processed = 0 + row_num = 2 + + for record in queryset.iterator(chunk_size=batch_size): + ws.cell(row=row_num, column=1, value=record.msgid) + ws.cell(row=row_num, column=2, value=record.source_connector) + ws.cell(row=row_num, column=3, value=record.routed_cid) + ws.cell(row=row_num, column=4, value=record.decoded_source_addr) + ws.cell(row=row_num, column=5, value=record.decoded_destination_addr) + ws.cell(row=row_num, column=6, value=float(record.rate) if record.rate else 0.0) + ws.cell(row=row_num, column=7, value=record.pdu_count) + ws.cell(row=row_num, column=8, value=record.decoded_short_message) + ws.cell(row=row_num, column=9, value=record.status) + ws.cell(row=row_num, column=10, value=record.uid) + ws.cell(row=row_num, column=11, value=record.trials) + ws.cell(row=row_num, column=12, value=record.created_at.strftime('%Y-%m-%d %H:%M:%S') if record.created_at else '') + ws.cell(row=row_num, column=13, value=record.status_at.strftime('%Y-%m-%d %H:%M:%S') if record.status_at else '') + + row_num += 1 + processed += 1 + + if processed % 100 == 0: + progress = 10 + int((processed / total_records) * 80) + cache.set(f'export_progress_{task_id}', { + 'status': 'processing', + 'progress': progress, + 'processed': processed, + 'total': total_records + }, 300) + + # Auto-size columns + for col in range(1, len(headers) + 1): + ws.column_dimensions[get_column_letter(col)].width = 15 + + # Save to BytesIO + output = io.BytesIO() + wb.save(output) + content = output.getvalue() + content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + filename = f'submit_logs_{datetime.now().strftime("%Y%m%d_%H%M%S")}.xlsx' + + # Store the file content in cache temporarily (for 5 minutes) + cache.set(f'export_file_{task_id}', { + 'content': content, + 'content_type': content_type, + 'filename': filename + }, 300) + + # Update progress: Completed + cache.set(f'export_progress_{task_id}', { + 'status': 'completed', + 'progress': 100, + 'total': total_records, + 'filename': filename + }, 300) + + logger.info(f"Export completed: {filename} ({total_records} records)") + + return { + 'status': 'success', + 'filename': filename, + 'total_records': total_records + } + + except Exception as e: + logger.error(f"Export failed with error: {str(e)}", exc_info=True) + if task_id: + cache.set(f'export_progress_{task_id}', { + 'status': 'failed', + 'progress': 0, + 'error': str(e) + }, 300) + return {'status': 'error', 'message': str(e)} diff --git a/main/web/static/web/content/submit_logs.js b/main/web/static/web/content/submit_logs.js index 183219f..1dda323 100644 --- a/main/web/static/web/content/submit_logs.js +++ b/main/web/static/web/content/submit_logs.js @@ -1,4 +1,183 @@ (function($){ var local_path = window.location.pathname, csrfmiddlewaretoken = document.getElementsByName('csrfmiddlewaretoken')[0].value; $("li.nav-item.submit_logs-menu").addClass("active"); + + var currentTaskId = null; + var progressCheckInterval = null; + + // Function to get current filter values + function getFilterValues() { + return { + search: $('#search').val(), + status_filter: $('#status_filter').val(), + date_column: $('#date_column').val(), + date_from: $('#date_from').val(), + date_to: $('#date_to').val() + }; + } + + // Function to start export + function startExport(format) { + var filters = getFilterValues(); + + // Show modal + $('#exportProgressModal').modal('show'); + resetProgressModal(); + + // Make AJAX request to start export + $.ajax({ + url: export_url, + type: 'POST', + data: { + format: format, + search: filters.search, + status_filter: filters.status_filter, + date_column: filters.date_column, + date_from: filters.date_from, + date_to: filters.date_to, + csrfmiddlewaretoken: csrfmiddlewaretoken + }, + success: function(response) { + if (response.status === 'started') { + currentTaskId = response.task_id; + updateProgressMessage(main_trans.export_started, 'primary'); + startProgressCheck(); + } else { + updateProgressMessage(main_trans.export_error, 'danger'); + stopProgressCheck(); + } + }, + error: function() { + updateProgressMessage(main_trans.export_error, 'danger'); + stopProgressCheck(); + } + }); + } + + // Function to check progress + function checkProgress() { + if (!currentTaskId) return; + + var progressUrl = progress_url_template.replace('TASK_ID', currentTaskId); + + $.ajax({ + url: progressUrl, + type: 'GET', + success: function(data) { + if (data.status === 'processing') { + updateProgressBar(data.progress || 0); + if (data.processed && data.total) { + $('#export-details').text( + main_trans.records_processed + ': ' + data.processed + ' / ' + data.total + ); + } + } else if (data.status === 'completed') { + updateProgressBar(100); + updateProgressMessage(main_trans.export_completed, 'success'); + showDownloadButton(); + stopProgressCheck(); + if (data.total) { + $('#export-details').text( + main_trans.records_processed + ': ' + data.total + ); + } + } else if (data.status === 'failed') { + updateProgressMessage(main_trans.export_failed + ' ' + (data.error || ''), 'danger'); + stopProgressCheck(); + } else if (data.status === 'not_found') { + updateProgressMessage(main_trans.export_error, 'warning'); + stopProgressCheck(); + } + }, + error: function() { + updateProgressMessage(main_trans.export_error, 'danger'); + stopProgressCheck(); + } + }); + } + + // Function to start progress checking + function startProgressCheck() { + if (progressCheckInterval) { + clearInterval(progressCheckInterval); + } + progressCheckInterval = setInterval(checkProgress, 1000); // Check every second + } + + // Function to stop progress checking + function stopProgressCheck() { + if (progressCheckInterval) { + clearInterval(progressCheckInterval); + progressCheckInterval = null; + } + } + + // Function to update progress bar + function updateProgressBar(progress) { + var $progressBar = $('#export-progress-bar'); + $progressBar.css('width', progress + '%'); + $progressBar.attr('aria-valuenow', progress); + $progressBar.text(Math.round(progress) + '%'); + } + + // Function to update progress message + function updateProgressMessage(message, type) { + var iconClass = 'fas fa-spinner fa-spin'; + if (type === 'success') iconClass = 'fas fa-check-circle'; + else if (type === 'danger') iconClass = 'fas fa-times-circle'; + else if (type === 'warning') iconClass = 'fas fa-exclamation-triangle'; + + $('#export-status-message').html( + '' + + '

' + message + '

' + ); + } + + // Function to show download button + function showDownloadButton() { + $('#download-export-btn').show(); + $('#close-export-modal-btn').text('Close'); + } + + // Function to reset progress modal + function resetProgressModal() { + updateProgressBar(0); + updateProgressMessage(main_trans.export_processing, 'primary'); + $('#download-export-btn').hide(); + $('#close-export-modal-btn').text('Close'); + $('#export-details').text(''); + } + + // Function to download file + function downloadFile() { + if (!currentTaskId) return; + + var downloadUrl = download_url_template.replace('TASK_ID', currentTaskId); + window.location.href = downloadUrl; + + // Close modal after a short delay + setTimeout(function() { + $('#exportProgressModal').modal('hide'); + }, 1000); + } + + // Event handlers + $('#export-csv').on('click', function() { + startExport('csv'); + }); + + $('#export-xlsx').on('click', function() { + startExport('xlsx'); + }); + + $('#download-export-btn').on('click', function() { + downloadFile(); + }); + + // Clean up on modal close + $('#exportProgressModal').on('hidden.bs.modal', function() { + stopProgressCheck(); + currentTaskId = null; + }); + })(jQuery); \ No newline at end of file diff --git a/main/web/static/web/dashboard.js b/main/web/static/web/dashboard.js index 6b3e44b..97473e7 100644 --- a/main/web/static/web/dashboard.js +++ b/main/web/static/web/dashboard.js @@ -1,5 +1,7 @@ (function($){ //var csrfmiddlewaretoken = document.getElementsByName('csrfmiddlewaretoken')[0].value; + + // Gateway state check var gw_state = function() { $.ajax({ type: "GET", @@ -21,4 +23,194 @@ }); } gw_state(); + + // Chart.js Configuration + var timelineChart = null; + var donutChart = null; + var currentGrouping = 'daily'; + + // Initialize Timeline Chart + function initTimelineChart(labels, successData, failedData) { + var ctx = document.getElementById('timelineChart'); + if (!ctx) return; + + // Destroy existing chart if exists + if (timelineChart) { + timelineChart.destroy(); + } + + timelineChart = new Chart(ctx, { + type: 'line', + data: { + labels: labels, + datasets: [ + { + label: 'Success', + data: successData, + borderColor: 'rgb(28, 200, 138)', + backgroundColor: 'rgba(28, 200, 138, 0.1)', + borderWidth: 2, + tension: 0.4, + fill: true, + pointRadius: 3, + pointHoverRadius: 5 + }, + { + label: 'Failed', + data: failedData, + borderColor: 'rgb(231, 74, 59)', + backgroundColor: 'rgba(231, 74, 59, 0.1)', + borderWidth: 2, + tension: 0.4, + fill: true, + pointRadius: 3, + pointHoverRadius: 5 + } + ] + }, + options: { + responsive: true, + maintainAspectRatio: false, + interaction: { + mode: 'index', + intersect: false + }, + plugins: { + legend: { + display: true, + position: 'top' + }, + tooltip: { + callbacks: { + label: function(context) { + return context.dataset.label + ': ' + context.parsed.y.toLocaleString(); + } + } + } + }, + scales: { + y: { + beginAtZero: true, + ticks: { + callback: function(value) { + return value.toLocaleString(); + } + } + }, + x: { + grid: { + display: false + } + } + } + } + }); + } + + // Initialize Donut Chart + function initDonutChart(successCount, failedCount, unknownCount) { + var ctx = document.getElementById('donutChart'); + if (!ctx) return; + + // Destroy existing chart if exists + if (donutChart) { + donutChart.destroy(); + } + + donutChart = new Chart(ctx, { + type: 'doughnut', + data: { + labels: ['Success', 'Failed', 'Unknown'], + datasets: [{ + data: [successCount, failedCount, unknownCount], + backgroundColor: [ + 'rgb(28, 200, 138)', + 'rgb(231, 74, 59)', + 'rgb(133, 135, 150)' + ], + borderWidth: 0, + hoverOffset: 4 + }] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'bottom' + }, + tooltip: { + callbacks: { + label: function(context) { + var label = context.label || ''; + var value = context.parsed || 0; + var total = context.dataset.data.reduce((a, b) => a + b, 0); + var percentage = total > 0 ? ((value / total) * 100).toFixed(1) : 0; + return label + ': ' + value.toLocaleString() + ' (' + percentage + '%)'; + } + } + } + } + } + }); + } + + // Load timeline data based on grouping + function loadTimelineData(grouping) { + $.ajax({ + type: "GET", + url: window.location.pathname + 'manage/', + data: { + s: 'submit_log_timeline', + grouping: grouping + }, + beforeSend: function() { + // Show loading state + $('.grouping-buttons button').prop('disabled', true); + }, + success: function(data) { + if (data.status === 'success') { + initTimelineChart(data.labels, data.success, data.failed); + currentGrouping = grouping; + } + }, + error: function(jqXHR, textStatus, errorThrown) { + console.error('Failed to load timeline data:', errorThrown); + }, + complete: function() { + $('.grouping-buttons button').prop('disabled', false); + } + }); + } + + // Initialize charts with data from Django + if (typeof chartData !== 'undefined') { + // Initialize timeline chart with initial data + initTimelineChart( + chartData.timeline.labels, + chartData.timeline.success, + chartData.timeline.failed + ); + + // Initialize donut chart + initDonutChart( + chartData.donut.success, + chartData.donut.failed, + chartData.donut.unknown + ); + } + + // Grouping button click handlers + $('.grouping-buttons button').on('click', function() { + var grouping = $(this).data('grouping'); + + // Update button states + $('.grouping-buttons button').removeClass('active'); + $(this).addClass('active'); + + // Load new data + loadTimelineData(grouping); + }); + })(jQuery); \ No newline at end of file diff --git a/main/web/templates/web/content/submit_logs.html b/main/web/templates/web/content/submit_logs.html index 4f37d1f..7f79768 100644 --- a/main/web/templates/web/content/submit_logs.html +++ b/main/web/templates/web/content/submit_logs.html @@ -1,33 +1,129 @@ {% extends "web/base.html" %} {% load static i18n %} {% block title %}{% trans "Submit Logs" %}{% endblock title %} -{% block extracss %}{% endblock extracss %} +{% block extracss %} + +{% endblock extracss %} {% block content %}

{% trans "Submit Logs" %}

-
-
- -
-
- -
- - {% trans "Reset" %} -
+
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ +
+ + {% trans "Reset" %} +
+
+
+
+
+ + +
+ {% trans "Export will include all filtered records" %} +
+
+
-
- {% trans "Total" %}: {{ stats.total_count }} - {% trans "Success" %}: {{ stats.success_count }} - {% trans "Failed" %}: {{ stats.fail_count }} - {% trans "Unknown" %}: {{ stats.unknown_count }} +
+ + {% trans "Total" %}: {{ stats.total_count|default:0 }} + + + {% trans "Success" %}: {{ stats.success_count|default:0 }} + + + {% trans "Failed" %}: {{ stats.fail_count|default:0 }} + + + {% trans "Unknown" %}: {{ stats.unknown_count|default:0 }} +
@@ -81,11 +177,50 @@

{% trans "Submit Logs" %}

+ + {% csrf_token %} {% endblock content %} {% block extrajs %} {% endblock extrajs %} \ No newline at end of file diff --git a/main/web/templates/web/dashboard.html b/main/web/templates/web/dashboard.html index 89f4841..ab96ab3 100644 --- a/main/web/templates/web/dashboard.html +++ b/main/web/templates/web/dashboard.html @@ -1,7 +1,26 @@ {% extends "web/base.html" %} {% load static i18n humanize %} {% block title %}{% trans "Dashboard" %}{% endblock title %} -{% block extracss %}{% endblock extracss %} +{% block extracss %} + +{% endblock extracss %} {% block content %}
@@ -31,7 +50,69 @@
+ + +
+
+
+
+
{% trans "Submit Log Timeline" %}
+
+ + + + +
+
+
+
+ +
+
+
+
+
+
+
+
{% trans "Message Status" %}
+
+
+
+ +
+
+ + {% trans "Success" %}: {{ submit_stats.success|default:0 }} + + + {% trans "Failed" %}: {{ submit_stats.failed|default:0 }} + + + {% trans "Unknown" %}: {{ submit_stats.unknown|default:0 }} + +
+
+
+
+
{% endblock content %} {% block extrajs %} + + + {% endblock extrajs %} \ No newline at end of file diff --git a/main/web/urls.py b/main/web/urls.py index a2b832a..5659398 100644 --- a/main/web/urls.py +++ b/main/web/urls.py @@ -19,6 +19,9 @@ path('smppccm/', views.smppccm_view, name='smppccm_view'), path('send_message/manage/', views.send_message_view_manage, name='send_message_view_manage'), path('send_message/', views.send_message_view, name='send_message_view'), + path('submit_logs/export/', views.submit_logs_export, name='submit_logs_export'), + path('submit_logs/export/progress//', views.submit_logs_export_progress, name='submit_logs_export_progress'), + path('submit_logs/export/download//', views.submit_logs_export_download, name='submit_logs_export_download'), path('submit_logs/manage/', views.submit_logs_view_manage, name='submit_logs_view_manage'), path('submit_logs/', views.submit_logs_view, name='submit_logs_view'), path('users/manage/', views.users_view_manage, name='users_view_manage'), diff --git a/main/web/views/content/__init__.py b/main/web/views/content/__init__.py index 0ca1a83..84c80fc 100644 --- a/main/web/views/content/__init__.py +++ b/main/web/views/content/__init__.py @@ -5,5 +5,35 @@ from .mtrouter import mtrouter_view, mtrouter_view_manage from .send_message import send_message_view, send_message_view_manage from .smppccm import smppccm_view, smppccm_view_manage -from .submit_logs import submit_logs_view, submit_logs_view_manage +from .submit_logs import ( + submit_logs_view, + submit_logs_view_manage, + submit_logs_export, + submit_logs_export_progress, + submit_logs_export_download, +) from .users import users_view, users_view_manage + +__all__ = [ + "filters_view", + "filters_view_manage", + "groups_view", + "groups_view_manage", + "httpccm_view", + "httpccm_view_manage", + "morouter_view", + "morouter_view_manage", + "mtrouter_view", + "mtrouter_view_manage", + "send_message_view", + "send_message_view_manage", + "smppccm_view", + "smppccm_view_manage", + "submit_logs_view", + "submit_logs_view_manage", + "submit_logs_export", + "submit_logs_export_progress", + "submit_logs_export_download", + "users_view", + "users_view_manage", +] \ No newline at end of file diff --git a/main/web/views/content/smppccm.py b/main/web/views/content/smppccm.py index 190a06a..133e69a 100644 --- a/main/web/views/content/smppccm.py +++ b/main/web/views/content/smppccm.py @@ -81,6 +81,7 @@ def smppccm_view_manage(request): response["message"] = str(_("SMPPCCM stoped successfully!")) elif s == "restart": smppccm.stop(cid=request.POST.get("cid")) + time.sleep(1) response = smppccm.start(cid=request.POST.get("cid")) response["message"] = str(_("SMPPCCM restarted successfully!")) else: diff --git a/main/web/views/content/submit_logs.py b/main/web/views/content/submit_logs.py index f3ce5db..98607aa 100644 --- a/main/web/views/content/submit_logs.py +++ b/main/web/views/content/submit_logs.py @@ -1,11 +1,15 @@ from django.contrib.auth.decorators import login_required from django.shortcuts import render -from django.http import JsonResponse +from django.http import JsonResponse, HttpResponse from django.db.models import Count, Case, When, IntegerField, Q +from django.core.cache import cache +from django.views.decorators.http import require_http_methods +from datetime import datetime from main.core.models import SubmitLog from main.core.utils import paginate from main.core.tools import require_post_ajax +from main.core.tasks.export_submit_logs import export_submit_logs_task @login_required @@ -13,10 +17,30 @@ def submit_logs_view(request): # Get search and filter parameters search_query = request.GET.get('search', '').strip() status_filter = request.GET.get('status_filter', '') + date_column = request.GET.get('date_column', 'created_at') # 'created_at' or 'status_at' + date_from = request.GET.get('date_from', '').strip() + date_to = request.GET.get('date_to', '').strip() # Start with all logs submit_logs = SubmitLog.objects.all() + # Apply date range filter + if date_column and date_from: + try: + date_from_obj = datetime.strptime(date_from, '%Y-%m-%d') + submit_logs = submit_logs.filter(**{f'{date_column}__gte': date_from_obj}) + except ValueError: + pass # Invalid date format, skip filter + + if date_column and date_to: + try: + date_to_obj = datetime.strptime(date_to, '%Y-%m-%d') + # Add 23:59:59 to include the entire day + date_to_obj = date_to_obj.replace(hour=23, minute=59, second=59) + submit_logs = submit_logs.filter(**{f'{date_column}__lte': date_to_obj}) + except ValueError: + pass # Invalid date format, skip filter + # Apply search filter if search_query: submit_logs = submit_logs.filter( @@ -60,6 +84,9 @@ def submit_logs_view(request): "stats": stats, "search_query": search_query, "status_filter": status_filter, + "date_column": date_column, + "date_from": date_from, + "date_to": date_to, }) @@ -67,3 +94,81 @@ def submit_logs_view(request): def submit_logs_view_manage(request): response = {} return JsonResponse(response) + + +@login_required +@require_http_methods(["POST"]) +def submit_logs_export(request): + """Initiate async export of submit logs.""" + export_format = request.POST.get('format', 'csv') # 'csv' or 'xlsx' + + # Get current filters from request + filters = { + 'search': request.POST.get('search', '').strip(), + 'status_filter': request.POST.get('status_filter', ''), + 'date_column': request.POST.get('date_column', 'created_at'), + 'date_from': request.POST.get('date_from', '').strip(), + 'date_to': request.POST.get('date_to', '').strip(), + } + + # Convert dates to ISO format for task + if filters['date_from']: + try: + date_obj = datetime.strptime(filters['date_from'], '%Y-%m-%d') + filters['date_from'] = date_obj.isoformat() + except ValueError: + filters['date_from'] = '' + + if filters['date_to']: + try: + date_obj = datetime.strptime(filters['date_to'], '%Y-%m-%d') + date_obj = date_obj.replace(hour=23, minute=59, second=59) + filters['date_to'] = date_obj.isoformat() + except ValueError: + filters['date_to'] = '' + + # Start async task + task = export_submit_logs_task.delay(filters, export_format) + + return JsonResponse({ + 'status': 'started', + 'task_id': task.id, + 'message': 'Export started. Please wait...' + }) + + +@login_required +@require_http_methods(["GET"]) +def submit_logs_export_progress(request, task_id): + """Check the progress of an export task.""" + progress_data = cache.get(f'export_progress_{task_id}') + + if not progress_data: + return JsonResponse({ + 'status': 'not_found', + 'message': 'Task not found or expired' + }) + + return JsonResponse(progress_data) + + +@login_required +@require_http_methods(["GET"]) +def submit_logs_export_download(request, task_id): + """Download the exported file.""" + file_data = cache.get(f'export_file_{task_id}') + + if not file_data: + return HttpResponse('File not found or expired', status=404) + + response = HttpResponse( + file_data['content'], + content_type=file_data['content_type'] + ) + response['Content-Disposition'] = f'attachment; filename="{file_data["filename"]}"' + + # Clean up cache after download + cache.delete(f'export_file_{task_id}') + cache.delete(f'export_progress_{task_id}') + + return response diff --git a/main/web/views/home.py b/main/web/views/home.py index 78ffa0a..e54c1a0 100644 --- a/main/web/views/home.py +++ b/main/web/views/home.py @@ -2,15 +2,61 @@ from django.http import JsonResponse from django.contrib.auth.decorators import login_required from django.conf import settings +from django.db.models import Count, Q +from django.db.models.functions import TruncDate, TruncWeek, TruncMonth, TruncYear +from datetime import datetime, timedelta +import json from main.core.utils import get_client_ip, is_online from main.core.tools import require_get_ajax +from main.core.models import SubmitLog @login_required def dashboard_view(request): ip_address = get_client_ip(request) - return render(request, "web/dashboard.html", dict(ip_address=ip_address)) + + # Get submit log statistics + submit_stats = SubmitLog.objects.aggregate( + total=Count('id'), + success=Count('id', filter=Q(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS'])), + failed=Count('id', filter=Q(status='ESME_RDELIVERYFAILURE')), + unknown=Count('id', filter=~Q(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS', 'ESME_RDELIVERYFAILURE'])) + ) + + # Get last 30 days data for initial timeline chart (daily) + end_date = datetime.now() + start_date = end_date - timedelta(days=30) + + daily_data = SubmitLog.objects.filter( + created_at__gte=start_date, + created_at__lte=end_date + ).annotate( + date=TruncDate('created_at') + ).values('date').annotate( + success=Count('id', filter=Q(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS'])), + failed=Count('id', filter=Q(status='ESME_RDELIVERYFAILURE')) + ).order_by('date') + + # Format data for Chart.js + timeline_labels = [] + timeline_success = [] + timeline_failed = [] + + for entry in daily_data: + timeline_labels.append(entry['date'].strftime('%Y-%m-%d')) + timeline_success.append(entry['success']) + timeline_failed.append(entry['failed']) + + context = { + 'ip_address': ip_address, + 'submit_stats': submit_stats, + 'timeline_labels': json.dumps(timeline_labels), + 'timeline_success': json.dumps(timeline_success), + 'timeline_failed': json.dumps(timeline_failed), + } + + return render(request, "web/dashboard.html", context) @require_get_ajax @@ -21,4 +67,52 @@ def global_manage(request): # CHECK GATEWAY BINDING OK response["status"], response["message"] = is_online( host=settings.TELNET_HOST, port=settings.TELNET_PORT) + elif s == "submit_log_timeline": + # Get timeline data based on grouping + grouping = request.GET.get('grouping', 'daily') + + # Determine date range based on grouping + end_date = datetime.now() + if grouping == 'daily': + start_date = end_date - timedelta(days=30) + trunc_func = TruncDate + date_format = '%Y-%m-%d' + elif grouping == 'weekly': + start_date = end_date - timedelta(weeks=12) + trunc_func = TruncWeek + date_format = '%Y-W%W' + elif grouping == 'monthly': + start_date = end_date - timedelta(days=365) + trunc_func = TruncMonth + date_format = '%Y-%m' + else: # yearly + start_date = end_date - timedelta(days=365*3) + trunc_func = TruncYear + date_format = '%Y' + + data = SubmitLog.objects.filter( + created_at__gte=start_date, + created_at__lte=end_date + ).annotate( + period=trunc_func('created_at') + ).values('period').annotate( + success=Count('id', filter=Q(status__in=['ESME_ROK', 'ESME_RINVNUMDESTS'])), + failed=Count('id', filter=Q(status='ESME_RDELIVERYFAILURE')) + ).order_by('period') + + labels = [] + success_data = [] + failed_data = [] + + for entry in data: + if entry['period']: + labels.append(entry['period'].strftime(date_format)) + success_data.append(entry['success']) + failed_data.append(entry['failed']) + + response['labels'] = labels + response['success'] = success_data + response['failed'] = failed_data + response['status'] = 'success' + return JsonResponse(response, status=200) diff --git a/pyproject.toml b/pyproject.toml index 85f537b..c4dc014 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ dependencies = [ "tablib>=3.0.0,<4.0", "xlrd>=2.0.1,<3.0", "xlwt>=1.3.0,<2.0", + "openpyxl>=3.1.0,<4.0", "zipp>=3.8.0,<4.0", "smpplib>=2.2.4,<3.0", "psycopg[binary]>=3.1.8,<4.0", From 2cd57181be5503c2d58abbce69e9bd1c1fe1c414 Mon Sep 17 00:00:00 2001 From: 101t Date: Sat, 1 Nov 2025 11:02:47 +0300 Subject: [PATCH 5/6] fixing middleware activity logs, smppccm fix, celery fixes --- .travis.yml | 2 +- README.md | 13 ++++--- config/celery.py | 4 +-- config/settings/com.py | 8 +++-- config/version.py | 2 +- main/core/middleware.py | 60 ++++++++++++++++++++++++------- main/web/views/content/smppccm.py | 4 ++- pyproject.toml | 2 +- 8 files changed, 69 insertions(+), 26 deletions(-) diff --git a/.travis.yml b/.travis.yml index dec4162..8a4f9db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ before_install: - cp -rf sample.env .env before_script: # install requirements: - - pip install -U pip wheel uv && uv pip install -r pyproject.toml + - pip install -U pip wheel uv && uv pip install -r pyproject.toml --extra prod services: - redis-server addons: diff --git a/README.md b/README.md index ae98b63..b42bdbc 100644 --- a/README.md +++ b/README.md @@ -159,9 +159,11 @@ TELNET_USERNAME=jcliadmin TELNET_PW=jclipwd TELNET_TIMEOUT=10 -# Redis & Celery -REDIS_URL=redis://localhost:6379/0 -CELERY_BROKER_URL=amqp://guest:guest@localhost:5672// +# Redis for Cache & Celery +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_DB=0 +REDIS_PASSWORD= # Submit Log Feature SUBMIT_LOG=True @@ -284,8 +286,9 @@ For ARM-based systems: | `SECRET_KEY` | Django secret key | - | ✅ | | `ALLOWED_HOSTS` | Allowed hosts | `*` | ✅ | | `PRODB_URL` | PostgreSQL URL | - | ✅ | -| `REDIS_URL` | Redis URL | `redis://redis:6379/0` | ✅ | -| `CELERY_BROKER_URL` | RabbitMQ URL | `amqp://guest:guest@rabbit-mq:5672//` | ✅ | +| `REDIS_HOST` | Redis host | `redis` | ✅ | +| `REDIS_PORT` | Redis port | `6379` | ✅ | +| `REDIS_DB` | Redis database | `0` | ✅ | | `TELNET_HOST` | Jasmin telnet host | `127.0.0.1` | ✅ | | `TELNET_PORT` | Jasmin telnet port | `8990` | ✅ | | `TELNET_USERNAME` | Jasmin admin username | `jcliadmin` | ✅ | diff --git a/config/celery.py b/config/celery.py index 0d30e43..611fbc8 100644 --- a/config/celery.py +++ b/config/celery.py @@ -12,8 +12,8 @@ from django.utils import timezone logger = get_task_logger(__name__) -CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", default="redis://localhost:6379/0") -CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND", default="redis://localhost:6379/0") +CELERY_BROKER_URL = settings.REDIS_URL +CELERY_RESULT_BACKEND = settings.REDIS_URL app = Celery('config') diff --git a/config/settings/com.py b/config/settings/com.py index c7501bb..029efa0 100644 --- a/config/settings/com.py +++ b/config/settings/com.py @@ -144,12 +144,16 @@ REDIS_HOST = os.environ.get("REDIS_HOST", default="redis") REDIS_PORT = int(os.environ.get("REDIS_PORT", default=6379)) REDIS_DB = int(os.environ.get("REDIS_DB", default=0)) -REDIS_URL = (REDIS_HOST, REDIS_PORT) +REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", default="") +if REDIS_PASSWORD: + REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}' +else: + REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.redis.RedisCache', - 'LOCATION': f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}', + 'LOCATION': REDIS_URL, 'KEY_PREFIX': 'jasmin_cache', 'TIMEOUT': 300, # 5 minutes default } diff --git a/config/version.py b/config/version.py index ce31092..6e47318 100644 --- a/config/version.py +++ b/config/version.py @@ -1 +1 @@ -VERSION = "3.0.1" +VERSION = "3.0.3" diff --git a/main/core/middleware.py b/main/core/middleware.py index 4b4c14a..98835ec 100644 --- a/main/core/middleware.py +++ b/main/core/middleware.py @@ -1,7 +1,7 @@ from django.conf import settings from django.utils.deprecation import MiddlewareMixin from django.core.cache import cache -from django.db import transaction +from django.db import transaction, connection from .utils import get_user_agent, get_client_ip, LazyEncoder from .models import ActivityLog @@ -73,20 +73,54 @@ def process_request(self, request): self._enqueue_activity_log_creation(request, user_agent) def _enqueue_activity_log_creation(self, request, user_agent): - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(max_workers=1) as executor: - executor.submit(self._create_activity_log, request, user_agent) + # Extract data from request before threading to avoid context issues + log_data = { + 'user_id': request.user.id, + 'service': request.POST.get("s", "unknown"), + 'method': request.method, + 'params': self.clean_params(request.POST or request.GET or {}), + 'path': request.path, + 'ip': get_client_ip(request), + 'user_agent': user_agent, + } + + # Use threading.Thread instead of ThreadPoolExecutor for simpler lifecycle + import threading + thread = threading.Thread(target=self._create_activity_log_safe, args=(log_data,), daemon=True) + thread.start() + def clean_params(self, params): + """Clean sensitive parameters before logging.""" + cleaned = dict(params) + sensitive_keys = ['password', 'passwd', 'pwd', 'secret', 'token', 'api_key', 'apikey'] + for key in list(cleaned.keys()): + if any(sensitive in key.lower() for sensitive in sensitive_keys): + cleaned[key] = '***REDACTED***' + return cleaned + + def _create_activity_log_safe(self, log_data): + """Thread-safe activity log creation with proper DB connection handling.""" + try: + self._create_activity_log(log_data) + except Exception as e: + logger.error(f"Failed to create activity log: {str(e)}", exc_info=True) + finally: + # Close database connection for this thread + connection.close() + @transaction.atomic - def _create_activity_log(self, request, user_agent): - params = self.clean_params(request.POST or request.GET or {}) + def _create_activity_log(self, log_data): + from django.contrib.auth import get_user_model + User = get_user_model() + + user = User.objects.get(id=log_data['user_id']) activity_log = ActivityLog( - user=request.user, - service=request.POST.get("s", "unknown"), - method=request.method, - params=json.dumps(params, cls=LazyEncoder), - path=request.path, - ip=get_client_ip(request), - user_agent=json.dumps(user_agent.__dict__ or {}, cls=LazyEncoder), + user=user, + service=log_data['service'], + method=log_data['method'], + params=json.dumps(log_data['params'], cls=LazyEncoder), + path=log_data['path'], + ip=log_data['ip'], + user_agent=json.dumps(log_data['user_agent'].__dict__ or {}, cls=LazyEncoder), ) activity_log.save() diff --git a/main/web/views/content/smppccm.py b/main/web/views/content/smppccm.py index 133e69a..fb12596 100644 --- a/main/web/views/content/smppccm.py +++ b/main/web/views/content/smppccm.py @@ -1,3 +1,5 @@ +import time + from django.utils.translation import gettext as _ from django.shortcuts import render from django.http import JsonResponse @@ -81,7 +83,7 @@ def smppccm_view_manage(request): response["message"] = str(_("SMPPCCM stoped successfully!")) elif s == "restart": smppccm.stop(cid=request.POST.get("cid")) - time.sleep(1) + time.sleep(6) response = smppccm.start(cid=request.POST.get("cid")) response["message"] = str(_("SMPPCCM restarted successfully!")) else: diff --git a/pyproject.toml b/pyproject.toml index c4dc014..a511199 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "jasmin-web-panel" -version = "3.0.1" +version = "3.0.3" description = "A web panel for Jasmin SMS Gateway" readme = "README.md" requires-python = ">=3.11" From 48630b68ff146cf975909a3af9c60c32d2359d14 Mon Sep 17 00:00:00 2001 From: 101t Date: Sat, 1 Nov 2025 13:00:21 +0300 Subject: [PATCH 6/6] * docker-compose.yaml updated * rabbitmq.conf custom configurations added --- docker-compose.yml | 50 +++++++++++++++++++++++++++++++++------------- rabbitmq.conf | 22 ++++++++++++++++++++ 2 files changed, 58 insertions(+), 14 deletions(-) create mode 100644 rabbitmq.conf diff --git a/docker-compose.yml b/docker-compose.yml index 5bf74f7..dd6553c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.8' services: jasmin-web: - image: tarekaec/jasmin_web_panel:1.3 + image: tarekaec/jasmin_web_panel:1.4.2 ports: - "${JASMIN_WEB_PORT:-8999}:8000" deploy: @@ -22,11 +22,11 @@ services: depends_on: - redis - db - - rabbit-mq + - rabbitmq restart: unless-stopped jasmin-celery: - image: tarekaec/jasmin_web_panel:1.3 + image: tarekaec/jasmin_web_panel:1.4.2 entrypoint: bash ./docker-entrypoint-celery.sh deploy: replicas: 1 @@ -35,8 +35,6 @@ services: environment: DEBUG: 0 DJANGO_SETTINGS_MODULE: config.settings.pro - CELERY_BROKER_URL: redis://redis:6379/0 - CELERY_RESULT_BACKEND: redis://redis:6379/0 CELERY_LOG_LEVEL: info healthcheck: disable: true @@ -70,19 +68,25 @@ services: security_opt: - no-new-privileges:true - rabbit-mq: + rabbitmq: image: rabbitmq:3.10-management-alpine restart: unless-stopped environment: RABBITMQ_DEFAULT_USER: guest RABBITMQ_DEFAULT_PASS: guest + #RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-ssl_opts.server_name_indication disable" + volumes: + - ./rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf + - rabbitmq_data:/var/lib/rabbitmq healthcheck: test: rabbitmq-diagnostics -q ping deploy: resources: limits: - cpus: "${RABBITMQ_CPU:-2}" - memory: ${RABBITMQ_MEM:-512M} + cpus: "${RABBITMQ_CPU:-4}" + memory: ${RABBITMQ_MEM:-1024M} + reservations: + memory: 512M security_opt: - no-new-privileges:true @@ -108,17 +112,22 @@ services: - ./jasmin_config/resource:/etc/jasmin/resource - ./jasmin_config/store:/etc/jasmin/store - ./jasmin_config:/etc/jasmin + - ./logs:/var/log/jasmin ports: - "${JASMIN_SMS_PORT:-2775}:2775" - "${JASMIN_DASHBOARD_PORT:-8990}:8990" - "${JASMIN_HTTP_API_PORT:-1401}:1401" depends_on: - redis - - rabbit-mq + - rabbitmq environment: REDIS_CLIENT_HOST: redis - AMQP_BROKER_HOST: rabbit-mq + AMQP_BROKER_HOST: rabbitmq AMQP_BROKER_PORT: 5672 + sysctls: + - net.ipv4.tcp_keepalive_time=60 + - net.ipv4.tcp_keepalive_intvl=10 + - net.ipv4.tcp_keepalive_probes=5 deploy: resources: limits: @@ -136,21 +145,32 @@ services: command: bash -c "sleep 15 && pip install -U pip psycopg2-binary mysql-connector-python && exec python /build/misc/scripts/sms_logger.py" environment: DB_TYPE_MYSQL: ${DB_TYPE_MYSQL:-0} - AMQP_BROKER_HOST: ${AMQP_BROKER_HOST:-rabbit-mq} + AMQP_BROKER_HOST: ${AMQP_BROKER_HOST:-rabbitmq} AMQP_BROKER_PORT: ${AMQP_BROKER_PORT:-5672} AMQP_SPEC_FILE: '/etc/jasmin/resource/amqp0-9-1.xml' + AMQP_HEARTBEAT: 60 # Enable 60-second heartbeats + RECONNECT_DELAY: 5 # Initial reconnect delay + MAX_RECONNECT_DELAY: 60 # Max reconnect delay DB_HOST: ${DB_HOST:-db} DB_DATABASE: ${DB_DATABASE:-jasmin} DB_TABLE: ${DB_TABLE:-submit_log} DB_USER: ${DB_USER:-jasmin} DB_PASS: ${DB_PASS:-jasmin} depends_on: - - rabbit-mq + - rabbitmq - db - jasmin restart: unless-stopped healthcheck: - disable: true + test: ["CMD", "pgrep", "-f", "sms_logger.py"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + sysctls: + net.ipv4.tcp_keepalive_time: 60 + net.ipv4.tcp_keepalive_intvl: 10 + net.ipv4.tcp_keepalive_probes: 5 volumes: web_public: @@ -159,7 +179,9 @@ volumes: driver: local redis_data: driver: local + rabbitmq_data: + driver: local postgres_data: driver: local monitoring_data: - driver: local \ No newline at end of file + driver: local diff --git a/rabbitmq.conf b/rabbitmq.conf new file mode 100644 index 0000000..572c07b --- /dev/null +++ b/rabbitmq.conf @@ -0,0 +1,22 @@ +# RabbitMQ Configuration + +# Enable heartbeat on server side (60 seconds) +heartbeat = 60 + +# TCP connection settings +tcp_listen_options.backlog = 128 +tcp_listen_options.nodelay = true +tcp_listen_options.keepalive = true + +# Consumer timeout (detect stalled consumers) +# Set to 30 minutes - adjust based on your needs +consumer_timeout = 7200000 + +# Memory and disk thresholds +vm_memory_high_watermark.relative = 0.6 +disk_free_limit.absolute = 2GB + +# Logging +log.console = true +log.console.level = info +log.file.level = info