diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 94c622eed67971093d22f92882d484429a8d5f89..cc001254e8ea3c8303a2dbc100bf31ee2d961836 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -70,11 +70,14 @@ publish: stage: publish before_script: - *cd-switch-environment - - IMAGE=tbcare/backend:$ENVIRONMENT + - IMAGE_BACKEND=tbcare/backend:$ENVIRONMENT + - IMAGE_LOCUST=tbcare/locust:$ENVIRONMENT - echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin script: - - docker build --tag $IMAGE . - - docker push $IMAGE + - docker build --tag $IMAGE_BACKEND . + - docker push $IMAGE_BACKEND + - docker build --tag $IMAGE_LOCUST ./monitoring/locust + - docker push $IMAGE_LOCUST deploy: extends: .cd-job-template @@ -95,28 +98,6 @@ deploy: git checkout $CI_COMMIT_REF_NAME && git merge --ff && docker-compose pull && - docker-compose up -d && + docker-compose -f docker-compose.yml up -d && exit " - -load-test: - image: python:3.6.5 - stage: performance-test - only: - - schedules - allow_failure: true - before_script: - - cd locust - - pip install -r requirements.txt - script: - - ls -al - - locust -f locust.py --headless -u 100 -r 10 -t 10m -H https://tbcare-be-staging.cs.ui.ac.id --csv=load_test --only-summary - - ls -al - after_script: - - cd locust - - ls -al - - tar czvf load_test_result.tar.gz *.csv - artifacts: - paths: - - locust/*.csv - - locust/load_test_result.tar.gz \ No newline at end of file diff --git a/apps/exportables/tests/test_units/test_exportables.py b/apps/exportables/tests/test_units/test_exportables.py index e3b9e05f7ff5934500ea9d911b8bed08ba8d0f8e..8207861c87d3a6e11ea255d7a125262794e1074a 100644 --- a/apps/exportables/tests/test_units/test_exportables.py +++ b/apps/exportables/tests/test_units/test_exportables.py @@ -134,9 +134,9 @@ class ExportableViewTest(APITestCase): response_data = json.loads(response.content) self.assertEqual(response_data["total_count"], 5) - def test_filter_exportable_empty_return_values(self): - start_date = datetime.now(tz=pytz.timezone(settings.TIME_ZONE)) + timedelta(days=1) - end_date = datetime.now(tz=pytz.timezone(settings.TIME_ZONE)) + timedelta(days=2) + def test_filter_exportable_empty_return_values(self): # TODO: H+1 to H+2 causes this test to fail. Need attention + start_date = datetime.now(tz=pytz.timezone(settings.TIME_ZONE)) + timedelta(days=2) + end_date = datetime.now(tz=pytz.timezone(settings.TIME_ZONE)) + timedelta(days=3) url = "/exportables/?start_date={}&end_date={}".format( start_date.isoformat()[0:10], end_date.isoformat()[0:10]) response = self.client.get(url) diff --git a/docker-compose.yml b/docker-compose.yml index 0945504a579d8743ca944003ab3394b8600a0f5a..74913a823e3a9dcf41b37b11870de3d0a63995d6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,6 +5,9 @@ services: image: nginx:1.19-alpine ports: - '${PORT:-8000}:80' + networks: + - backend + - monitoring volumes: - ./nginx.conf:/etc/nginx/conf.d/default.conf - staticfiles:/var/www/html/static/ @@ -15,6 +18,8 @@ services: image: tbcare/backend:${ENVIRONMENT:-staging} depends_on: - db + networks: + - backend volumes: - staticfiles:/srv/staticfiles environment: @@ -35,11 +40,64 @@ services: image: postgres:13.2-alpine volumes: - postgres_data:/var/lib/postgresql/data/ + networks: + - backend environment: - POSTGRES_DB=${DATABASE_NAME} - POSTGRES_USER=${DATABASE_USER} - POSTGRES_PASSWORD=${DATABASE_PASSWORD} + + locust: + image: tbcare/locust:development + container_name: locust + ports: + - '8089:8089' + networks: + - monitoring + volumes: + - ./monitoring/locust:/mnt/locust + command: -f /mnt/locust/locust.py -H http://nginx:80 + + locust-exporter: + image: containersol/locust_exporter + container_name: locustexporter + networks: + - monitoring + environment: + - LOCUST_EXPORTER_URI=http://locust:8089 + depends_on: + - locust + + prometheus: + image: prom/prometheus + container_name: prometheus + networks: + - monitoring + volumes: + - ./monitoring/prometheus:/etc/prometheus + depends_on: + - locust-exporter + + grafana: + image: grafana/grafana + container_name: grafana + ports: + - '3000:3000' + networks: + - monitoring + volumes: + - grafana-storage:/var/lib/grafana + - ./monitoring/grafana/grafana.ini:/etc/grafana/grafana.ini + depends_on: + - prometheus + +networks: + monitoring: + driver: bridge + backend: + driver: bridge volumes: postgres_data: staticfiles: + grafana-storage: diff --git a/monitoring/README.md b/monitoring/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0017803aeb8dc3a885e54b59575f30190f3fc8b7 --- /dev/null +++ b/monitoring/README.md @@ -0,0 +1,29 @@ +# Monitoring and Load Testing Modules + +This folder contains all configurations for monitoring and alerting tools used by TBCare. Traffic is generated using containerized Locust into the application backend. Metrics generated is exported using metric exporter provided by container solution (containersol/locust-exporter). Metric ingestion and processing is done using Prometheus Monitoring. Finally, load testing results are visualized using Grafana, an open source analytics and monitoring solution that can be customized to show KPIs effectively in real time. + +![](./diagram.png) + +## Setting Up Monitoring System + +All containers can be created automatically using docker compose using the docker-compose.yml file provided in the root folder. Simply run ```docker-compose up -d``` + +After all containers are up, you may need to manually configure your Grafana data visualizer and create your own dashboard. Our current configuration file does not include any dashboard, though we recommend using [this dashboard](https://github.com/ContainerSolutions/locust_exporter/blob/main/locust_dashboard.json) provided by ContainerSolutions: +- Access Grafana dashboard (see the URL format below) +- Perform authentication according to [this Grafana guide](https://grafana.com/docs/grafana/latest/getting-started/getting-started/). +- Configure Prometheus Data Source by navigating to `Settings > Data` Source and click Add Data Source. +- In the list of data sources, choose Prometheus. +- Set the name of your new data source as `Prometheus` and URL as `prometheus:9090`. This corresponds to the name of your Prometheus Monitoring container. +- Click "Save & Test" and confirm that Grafana find the data source to be working. +- Create your first dashboard by navigating to `Create > Import`. +- Upload your preferred dashboard configuration by uploading it via the "Upload JSON File" button or by pasting the file content inside the textbox. +- Click `Load` +- You should be able to see your dashboard starting to fetch your monitoring data immediately. + +## Accessing Monitoring Tools + +Both load generator and data visualization can be accessed publicly, assuming that nginx container is up: +- Locust: https://DOMAIN/locust +- Grafana: https://DOMAIN/monitoring + +For example, to access Locust from the staging area and start a new load testing job, go to https://tbcare-staging.cs.ui.ac.id/locust. You can see the testing result in real-time using https://tbcare-staging.cs.ui.ac.id/monitoring. diff --git a/monitoring/diagram.png b/monitoring/diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..2ebbac4c3a89b4eb14a846fde7a370289644ba3a Binary files /dev/null and b/monitoring/diagram.png differ diff --git a/monitoring/grafana/grafana.ini b/monitoring/grafana/grafana.ini new file mode 100644 index 0000000000000000000000000000000000000000..217afe55b097ce9a9f1305dc9da055430dd205e9 --- /dev/null +++ b/monitoring/grafana/grafana.ini @@ -0,0 +1,3 @@ +[server] +root_url = %(protocol)s://%(domain)s:%(http_port)s/monitoring/ +serve_from_sub_path = true diff --git a/monitoring/locust/Dockerfile b/monitoring/locust/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..023f34938e9b189427282f705ba840ee69e7990a --- /dev/null +++ b/monitoring/locust/Dockerfile @@ -0,0 +1,3 @@ +FROM locustio/locust +ADD ./requirements.txt ./requirements.txt +RUN pip3 install -r requirements.txt diff --git a/locust/__init__.py b/monitoring/locust/__init__.py similarity index 100% rename from locust/__init__.py rename to monitoring/locust/__init__.py diff --git a/locust/constants.py b/monitoring/locust/constants.py similarity index 100% rename from locust/constants.py rename to monitoring/locust/constants.py diff --git a/locust/locust.py b/monitoring/locust/locust.py similarity index 96% rename from locust/locust.py rename to monitoring/locust/locust.py index f2411b23027cc7d67ad38f89fbece39c16c64e5f..6f701378602d1091947581cf259422abb864683d 100644 --- a/locust/locust.py +++ b/monitoring/locust/locust.py @@ -24,10 +24,10 @@ class WebUser(HttpUser): end_date = self.faker.date_between(start_date=start_date, end_date='-1d') params = { - 'start-date': start_date, - 'end-date': end_date + 'start_date': start_date, + 'end_date': end_date } - self.client.get('/exportables/', headers=self._get_header(), params=params) + self.client.get('/exportables/', headers=self._get_header(), params=params, name='/exportables/?start_date=[startDate]&end_date=[endDate]') @task def get_case_subjects(self): diff --git a/locust/requirements.txt b/monitoring/locust/requirements.txt similarity index 68% rename from locust/requirements.txt rename to monitoring/locust/requirements.txt index ce0b14cdf9f2ee387bf36ebc57ab0de4cb3e574c..c19aab17e62515813ae8534677e90a8c661b2a97 100644 --- a/locust/requirements.txt +++ b/monitoring/locust/requirements.txt @@ -1,2 +1 @@ Faker==4.0.1 -locust \ No newline at end of file diff --git a/monitoring/prometheus/prometheus.yml b/monitoring/prometheus/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..3a6c6d0dc2f487056f6de9880d3d93341989aa7d --- /dev/null +++ b/monitoring/prometheus/prometheus.yml @@ -0,0 +1,30 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + scrape_interval: 2s + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['locustexporter:9646'] diff --git a/nginx.conf b/nginx.conf index ec3555147022913b7446a1574a37f7dde6dbc8a1..e5bfba39f34c4aed3ad6623eae4d0ec87f0d42d4 100644 --- a/nginx.conf +++ b/nginx.conf @@ -5,6 +5,28 @@ server { root /var/www/html/; } + location /locust/ { + server_name_in_redirect off; + rewrite /locust(.*) $1 break; + proxy_redirect off; + proxy_pass http://locust:8089; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /monitoring/ { + server_name_in_redirect off; + rewrite /monitoring(.*) $1 break; + proxy_redirect off; + proxy_pass http://grafana:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + location / { proxy_pass http://app:8000; proxy_set_header Host $http_host; @@ -15,3 +37,4 @@ server { client_max_body_size 2M; } +