Commit 4233e9a5 authored by madhuri.penikalapati's avatar madhuri.penikalapati

first commit

parents
Pipeline #56215 failed with stage
#Ignore the logs directory
logs/
#Ignoring the password file
passwords.txt
#Ignoring git and cache folders
.git
.cache
.gitignore
.gitlab-ci.yml
variables.yml
#Ignoring all the markdown and class files
*.md
**/*.class
.env
__pycache__
*.pyc
*.pyo
*.pyd
.Python
.env
pip-log.txt
pip-delete-this-directory.txt
.tox
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
*.log
\ No newline at end of file
MONGO_URI = mongodb://192.168.0.220:2717/
APP_NAME = ilens_scheduler
SCHEDULER_THREAD=120
SCHEDULER_PROCESS=20
MAX_INSTANCE=200
MISFIRE_SEC = 180
BASE_PATH=/data
MOUNT_DIR=/ilens_scheduler
REDIS_URI=redis://192.168.0.220:6379
SECURE_ACCESS = True
SW_DOCS_URL=/docs
SW_OPENAPI_URL=/openapi.json
ENABLE_CORS=True
CORS_URLS=staging.ilens.io
SECURE_COOKIE=True
VERIFY_SIGNATURE = True
PROTECTED_HOSTS = "*.unifytwin.com,*.ilens.io"
PORT = 1234
# Created by .ignore support plugin (hsz.mobi)
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
reports/*.pdf
reports/*.csv
reports/*.xlsx
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
.idea
logs
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
### VisualStudio template
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd
### JupyterNotebooks template
# gitignore template for Jupyter Notebooks
# website: http://jupyter.org/
.ipynb_checkpoints
*/.ipynb_checkpoints/*
# IPython
profile_default/
ipython_config.py
# Remove previous ipynb_checkpoints
# git rm -r .ipynb_checkpoints/
stages:
- pre-commit
- auto-tagging
- validate
- scan
- deploy
- update
- codequality
variables:
MYSQL_CONNECTION: "mysql -h $ILENS_MYSQL_HOST --port $ILENS_MYSQL_PORT -u $ILENS_MYSQL_USER -p$ILENS_MYSQL_PASSWORD "
HELM_CHART_CLONE_PATH: "/home/gitlab-runner/kubernetes/ilens/qa-helm-charts"
HELM_CHART: "$HELM_CHART_CLONE_PATH/$CI_PROJECT_NAME-$CI_JOB_ID/ilens-core/ilens-modules"
VAR_UPDATION_SCRIPT: /home/gitlab-runner/monitor/qa-helm-repo-update/execute.sh
AUTO_TAGGING_SCRIPT: /home/gitlab-runner/monitor/qa-helm-repo-update/autotagging.sh
K8_DEPLOYMENT_SCRIPT: /home/gitlab-runner/monitor/qa-helm-repo-update/k8-deployment.sh
STATUS_SCRIPT: /home/gitlab-runner/monitor/deployment-status.sh
PRE_COMMIT: /home/gitlab-runner/monitor/pre-commit/lint.sh
QA_HELM_GIT_PATH: https://$GIT_USRNAME:$GIT_USRPASSWD@$HELM_GIT_URL
VARIABLES_YML: variables.yml
TIMEOUT: 960s
DEPLOYMENT_YML: ilens-scheduler.yml
DEPLOYMENT_YML_220: $CI_PROJECT_NAME
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $ILENS_VERSION_DB.$DB_TABLE WHERE category='Server' AND type='Service' AND os='docker' AND module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -e "INSERT INTO $ILENS_VERSION_DB.$DB_TABLE values('Server','Service','$CI_PROJECT_NAME','docker', '2', '0', '0', '0')";fi
- QA=$($MYSQL_CONNECTION -N -e "SELECT qa FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- DEV=$($MYSQL_CONNECTION -N -e "SELECT dev FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- PROD=$($MYSQL_CONNECTION -N -e "SELECT prod FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
auto-tagging:
stage: auto-tagging
script:
- echo "Initializing auto-tagging..."
- >
if ! sh $AUTO_TAGGING_SCRIPT ; then
echo "Error occured during auto-tagging...!"
exit 1
fi
only:
- master
tags:
- shell
#~~~~~| Requirements.txt version check |~~~~~#
package-version-check:
stage: validate
script:
- REQUIREMENTS=$(cat requirements.txt)
- FAILED=0
- >
for REQ in ${REQUIREMENTS[@]};
do
if [ "${REQ:0:1}" = "#" ]; then continue; fi
PKG=$(echo $REQ | tr = " " | awk '{print $1}')
VER=$(echo $REQ | tr = " " | awk '{print $2}')
VER=${VER//[^[:alnum:]]/}
if [ ! -z "${VER//[0-9]}" ] || [ -z $VER ]; then
echo " Package version not specified for: $PKG "
FAILED=`expr $FAILED + 1`
fi
done
- if [ $FAILED -gt 0 ]; then exit 1; fi
only:
- QA
- develop
tags:
- shell
#~~~~~| Vulnerability Scanner |~~~~~#
vulnerability-scanner:
stage: scan
script:
- QA=`expr $QA + 1` && DEV=0
- DOCKER_IMAGE=$CI_PROJECT_NAME:vulnarable-scan
- docker build -t $DOCKER_IMAGE .
- trivy image --format template --template "@/home/gitlab-runner/image-scanner/templates/html.tpl" -o imageScanner-$CI_PROJECT_NAME.html $DOCKER_IMAGE
- trivy image --format json -o imageScanner-$CI_PROJECT_NAME.json $DOCKER_IMAGE
- docker rmi --force $DOCKER_IMAGE
- mv imageScanner-$CI_PROJECT_NAME.html /data0/email-util/module/reports/
- >
if ! /home/gitlab-runner/image-scanner/severity_check imageScanner-$CI_PROJECT_NAME.json ; then
cd /home/gitlab-runner/image-scanner/
./mail imageScanner-$CI_PROJECT_NAME.html $DOCKER_IMAGE
fi
only:
- QA
- develop
tags:
- shell
#~~~~~| QA K8 |~~~~~#
qa-k8-deployment:
stage: deploy
before_script:
- echo "Initializing the QA K8 deployment..."
- >
if ! sh $VAR_UPDATION_SCRIPT ; then
echo "Error while updating the variables...."
exit 1
fi
script:
- >
if ! sh $K8_DEPLOYMENT_SCRIPT ; then
echo "Error occured during QA K8 deployment...!"
exit 1
fi
after_script:
- rm -rf $HELM_CHART_CLONE_PATH/$CI_PROJECT_NAME-$CI_JOB_ID
only:
- QA
tags:
- shell
tag-update-qa:
stage: update
script:
- DEV=0
- REGISTRY_URL=azacrknowledgelens.azurecr.io/knowledgelens/products/ilens/qa
- docker rmi --force $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV
- QA=`expr $QA + 1` && DEV=0
- $MYSQL_CONNECTION -e "INSERT INTO $ILENS_HISTORY_DB.$DB_TABLE values('$CI_JOB_ID','Server','Service', '$CI_PROJECT_NAME','docker', '$PROD.$QA.$DEV', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
- $MYSQL_CONNECTION -e "UPDATE $ILENS_VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'"
dependencies:
- qa-k8-deployment
only:
- QA
tags:
- shell
#~~~~~| DEV 220 |~~~~~#
dev-deployment-220:
stage: deploy
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $ILENS_VERSION_DB.$DB_TABLE WHERE category='Server' AND type='Service' AND os='docker' AND module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -e "INSERT INTO $ILENS_VERSION_DB.$DB_TABLE values('Server','Service','$CI_PROJECT_NAME','docker', '2', '0', '0', '0')";fi
- QA=$($MYSQL_CONNECTION -N -e "SELECT qa FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- DEV=$($MYSQL_CONNECTION -N -e "SELECT dev FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- PROD=$($MYSQL_CONNECTION -N -e "SELECT prod FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
script:
- REGISTRY_URL=azacrknowledgelens.azurecr.io/knowledgelens/products/ilens/dev
- DEV=`expr $DEV + 1`
- IMAGE_URL=$REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV
- echo $IMAGE_URL
- DOCKER_COMPOSE=/opt/services/compose/$CI_COMMIT_BRANCH/docker-compose.yml
- VARIABLE_ENV_FILE="dev-variables.env"
#- CONTAINER_NAME_1=($DEPLOYMENT_YML_220)
#- CONTAINER_NAME=(${CONTAINER_NAME_1[@]%.*})
- DEV_PROJECT_NAME=$(echo $CI_PROJECT_NAME | cut -f "1" -d ".")
- echo $DEV_PROJECT_NAME
- tar czvf $CI_PROJECT_NAME.tar.gz *
- echo "Deploying to the dev 220 server..."
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "mkdir -p /tmp/$CI_PROJECT_NAME/tar/"
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "mkdir -p /tmp/$CI_PROJECT_NAME/untar/"
- sshpass -p $OFC_PASSWD scp $CI_PROJECT_NAME.tar.gz $OFC_USERNAME@$OFC_HOSTNAME:/tmp/$CI_PROJECT_NAME/tar/
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "tar xzvf /tmp/$CI_PROJECT_NAME/tar/$CI_PROJECT_NAME.tar.gz -C /tmp/$CI_PROJECT_NAME/untar/"
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "docker build -t $IMAGE_URL /tmp/$CI_PROJECT_NAME/untar/."
- OLD_IMAGE=$(sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "cat $DOCKER_COMPOSE | grep '&$DEV_PROJECT_NAME-image' | cut -f '3' -d ' ' ")
- echo "Current image":" $OLD_IMAGE"
- echo "New image":" $IMAGE_URL"
- sshpass -p $OFC_PASSWD scp $VARIABLE_ENV_FILE $OFC_USERNAME@$OFC_HOSTNAME:/opt/services/compose/develop/variables_env/$DEV_PROJECT_NAME.env
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "sed -i 's|'$OLD_IMAGE'|'$IMAGE_URL'|1' '$DOCKER_COMPOSE'"
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "docker-compose -f $DOCKER_COMPOSE up -d ${DEPLOYMENT_YML_220[@]}"
after_script:
- sshpass -p $OFC_PASSWD ssh $OFC_USERNAME@$OFC_HOSTNAME "rm -rf /tmp/$CI_PROJECT_NAME"
- rm -f $CI_PROJECT_NAME.tar.gz
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $ILENS_VERSION_DB.$DB_TABLE WHERE category='Server' AND type='Service' AND os='docker' AND module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -e "INSERT INTO $ILENS_VERSION_DB.$DB_TABLE values('Server','Service','$CI_PROJECT_NAME','docker', '2', '0', '0', '0')";fi
- QA=$($MYSQL_CONNECTION -N -e "SELECT qa FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- DEV=$($MYSQL_CONNECTION -N -e "SELECT dev FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- PROD=$($MYSQL_CONNECTION -N -e "SELECT prod FROM $ILENS_VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- DEV=`expr $DEV + 1`
- $MYSQL_CONNECTION -e "INSERT INTO $ILENS_HISTORY_DB.$DB_TABLE values('$CI_JOB_ID','Server','Service', '$CI_PROJECT_NAME','docker', '$PROD.$QA.$DEV', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
- $MYSQL_CONNECTION -e "UPDATE $ILENS_VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'"
only:
- develop
tags:
- shell
#~~~~~| CODE QUALITY |~~~~~#
codequality:
stage: codequality
image: $SONAR_SCANNER_IMAGE
script:
- /opt/sonar-scanner/bin/sonar-scanner -Dsonar.projectKey=$CI_PROJECT_NAME -Dsonar.sources=. -Dsonar.host.url=$SONAR_HOST -Dsonar.login=$SONAR_TOKEN
- sleep 5
- python3 /opt/code_quality_report/static_code_quality_report_csv_v2.py $CI_PROJECT_NAME $GITLAB_USER_EMAIL,$EMAIL_TO $EMAIL_FROM $EMAIL_PASSWD False admin $SONAR_PASSWD
only:
- develop
tags:
- docker
#~~~~~| CODE PRECOMMIT |~~~~~#
pre-commit:
stage: pre-commit
script:
- sh $PRE_COMMIT
only:
refs:
- merge_requests
variables:
- $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "master"
- $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "QA"
- $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop"
tags:
- shell
#!/usr/bin bash
pip install ruff black isort --upgrade
ruff scripts
black scripts --check
isort scripts --check-only
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: requirements-txt-fixer
- repo: https://github.com/omnilib/ufmt
rev: v2.0.0
hooks:
- id: ufmt
additional_dependencies:
- black == 22.6.0
- usort == 1.0.4
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
args:
- "--max-line-length=120"
- "--max-complexity=20"
- "--select=B,C,E,F,W,T4,B9"
# these are errors that will be ignored by flake8
# check out their meaning here
# https://flake8.pycqa.org/en/latest/user/error-codes.html
- "--ignore=E203,E266,E501,W503,F403,F401,E402"
FROM tiangolo/uvicorn-gunicorn-fastapi:python3.9-slim
COPY requirements.txt /app/requirements.txt
WORKDIR /app
RUN pip install -r requirements.txt
RUN apt update && apt install curl -y
COPY . /app
# iLens - DevOps
### GitFlow
Below is the list of branches for which CI/CD is configured.
### Branches
| Branch | Description | URL |
| --------- | ------------------------------------------------------------------------- | -------------------------------------------------------------- |
| `master` | Tag and Release the new Version. |-|
| `QA` | Deploy to Non-production, testing environment - AKS. | https://qa.ilens.io/ |
| `develop` | Deploy to the 220-Server and Dev-Kubernetes Cluster. | http://192.168.0.220/dev_master/ and http://192.168.0.236/dev_master/ |
| `feature/<feature_name>` | This holds the code base for feature update. |-|
| `patch/<patch_name>` | This holds the code base for patch update. |-|
- There are two environments for Development Team in which, one of them is a self hosted Kubernetes Cluster - http://192.168.0.236/dev_master/ and the other environment where source code is deployed - http://192.168.0.220/dev_master/ .
- The QA Environment is a AKS Cluster - https://qa.ilens.io/
- Production environments are all client environments.
### Merge Requests
1. When a Merge Request is raised that targets `develop` and `QA` branches, pipelines will be triggered for the deployment in the respective environments if the merge is completed.
1. When a feature update has to be made, a new branch named **`feature/`<feature_name>** has to be created from `master` branch. Once development is completed, the code should be merged back to `master` branch for which auto-tagging will happen.
1. When a patch update has to be made, a new branch named **`patch/`<patch_name>** has to be created from `master` branch. Once development is completed, the code should be merged back to `master` branch for which auto-tagging will happen.
The same is depicted in the below diagram:
<img src="https://gitlab-pm.knowledgelens.com/KnowledgeLens/Products/iLens-2.0/core/devops/scripts/-/raw/auto-tagging-ci/Patch-Feature-Flow.png" alt="Merge request"/>
### Environment
* [ ] MONGO_URI=mongodb://192.168.0.220:2717
* [ ] APP_NAME=ilens_scheduler
* [ ] SCHEDULER_THREAD=120
* [ ] SCHEDULER_PROCESS=20
* [ ] MAX_INSTANCE=200
* [ ] REDIS_URI=redis://192.168.0.220:6379
Version: v6.8
Release Note:
Enhancements:
- Fixed code smells .
- Added Redis URI.
\ No newline at end of file
if __name__ == '__main__':
from dotenv import load_dotenv
load_dotenv()
import uvicorn
from main import app
from scripts.constants.app_configuration import Service
from scripts.core.engine.scheduler_engine import scheduler
from scripts.logging.logging import logger
service_obj = Service()
scheduler.start()
if __name__ == "__main__":
try:
app.root_path = "ilens-scheduler"
logger.info("Starting the scheduler framework")
logger.info("Scheduler framework started successfully")
uvicorn.run("main:app", host=service_obj.host, port=int(service_obj.port))
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
raise
-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQClilTaeHq6Zc+kWHCNl1O0btGRm7ct3O5zqWx1mwwLUWH14eft
Hi5wIbOYh79JQ9BO2OA4UjPq31uwmJ96Okl0OULfENhwd/D7P3mnoRlktPT2t+tt
RRrKvx3wNpOy/3nBsXnNt8EKxyA7k9vbqLbv9pGw2hcqOYe/NGTkmm1PswIDAQAB
AoGAZPARR1l5NBkKYGKQ1rU0E+wSmx+AtVVmjF39RUSyNmB8Q+poebwSgsr58IKt
T6Yq6Tjyl0UAZTGmferCK0xJJrqyP0hMn4nNNut+acWMKyt+9YrA2FO+r5Jb9JuT
SK35xXnM4aZLGppgWJxRzctpIz+qkf6oLRSZme0AuiqcwYECQQDY+QDL3wbWplRW
bze0DsZRMkDAkNY5OCydvjte4SR/mmAzsrpNrS5NztWbaaQrefoPbsdYBPbd8rS7
C/s/0L1zAkEAw1EC5zt2STuhkcKLa/tL+bk8WHHHtf19aC9kBj1TvWBFh+JojWCo
86iK5fLcHzhyQx5Qi3E9LG2HvOWhS1iUwQJAKbEHHyWW2c4SLJ2oVXf1UYrXeGkc
UNhjclgobl3StpZCYAy60cwyNo9E6l0NR7FjhG2j7lzd1t4ZLkvqFmQU0wJATLPe
yQIwBLh3Te+xoxlQD+Tvzuf3/v9qpWSfClhBL4jEJYYDeynvj6iry3whd91J+hPI
m8o/tNfay5L+UcGawQJAAtbqQc7qidFq+KQYLnv5gPRYlX/vNM+sWstUAqvWdMze
JYUoTHKgiXnSZ4mizI6/ovsBOMJTb6o1OJCKQtYylw==
-----END RSA PRIVATE KEY-----
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQClilTaeHq6Zc+kWHCNl1O0btGR
m7ct3O5zqWx1mwwLUWH14eftHi5wIbOYh79JQ9BO2OA4UjPq31uwmJ96Okl0OULf
ENhwd/D7P3mnoRlktPT2t+ttRRrKvx3wNpOy/3nBsXnNt8EKxyA7k9vbqLbv9pGw
2hcqOYe/NGTkmm1PswIDAQAB
-----END PUBLIC KEY-----
[SERVICE]
port=$PORT
host=0.0.0.0
enable_security=false
allow_cross_origin=true
secure_cookie = $SECURE_COOKIE
[MONGO_DB]
uri = $MONGO_URI
[SCHEDULER]
SCHEDULER_THREAD=$SCHEDULER_THREAD
SCHEDULER_PROCESS=$SCHEDULER_PROCESS
MAX_INSTANCE=$MAX_INSTANCE
MISFIRE_SEC = $MISFIRE_SEC
[PATH]
base_path = $BASE_PATH
mount_dir = $MOUNT_DIR
[REDIS]
REDIS_URI = $REDIS_URI
login_db = 9
user_role_permissions = 21
project_tags_db = 18
\ No newline at end of file
if __name__ == '__main__':
from dotenv import load_dotenv
load_dotenv()
import uvicorn
from main import app
from scripts.constants.app_configuration import Service
from scripts.core.engine.scheduler_engine import scheduler
from scripts.logging.logging import logger
service_obj = Service()
scheduler.start()
if __name__ == "__main__":
try:
logger.info("Starting the scheduler framework")
logger.info("Scheduler framework started successfully")
uvicorn.run(app, host=service_obj.host, port=int(service_obj.port))
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
raise
MONGO_URI=mongodb://ilens:ilens4321@192.168.0.220:2717/?authSource=admin
APP_NAME=ilens_scheduler
SCHEDULER_THREAD=120
SCHEDULER_PROCESS=20
MAX_INSTANCE=200
MISFIRE_SEC=180
BASE_PATH=/data
MOUNT_DIR=/ilens_scheduler
REDIS_URI=redis://192.168.0.220:6379
SECURE_ACCESS=False
SW_DOCS_URL=/docs
SW_OPENAPI_URL=/openapi.json
ENABLE_CORS=True
CORS_URLS=staging.ilens.io
SECURE_COOKIE=False
VERIFY_SIGNATURE=False
PROTECTED_HOSTS="*.unifytwin.com,*.ilens.io"
PORT=1234
if __name__ == '__main__':
from dotenv import load_dotenv
load_dotenv()
import os
import uvicorn
from fastapi import FastAPI, Depends
from fastapi.middleware.cors import CORSMiddleware
from jwt_signature_validator.encoded_payload import (EncodedPayloadSignatureMiddleware)
from scripts.constants.app_configuration import Service
from scripts.constants.app_constants import Secrets
from scripts.core.engine.scheduler_engine import scheduler
from scripts.logging.logging import logger
from scripts.services.scheduler import scheduler_router, health_check
from scripts.utils.security_utils.decorators import CookieAuthentication
secure_access = os.environ.get("SECURE_ACCESS", default=False)
protected_hosts = os.environ.get("PROTECTED_HOSTS", "").split(",")
verify_signature = os.environ.get("verify_signature", default=False)
auth = CookieAuthentication()
app = FastAPI(
title="Scheduler Microservice",
version="1.0",
description="Scheduler App",
openapi_url=os.environ.get("SW_OPENAPI_URL", default="/openapi.json"),
docs_url=os.environ.get("SW_DOCS_URL"),
redoc_url=None
)
if os.environ.get("ENABLE_CORS") in (True, 'true', 'True') and os.environ.get("CORS_URLS"):
origins_list = os.environ.get("CORS_URLS", default="")
origins_list = origins_list.split(',') if origins_list else ["*"]
if verify_signature:
app.add_middleware(
EncodedPayloadSignatureMiddleware,
jwt_secret=Secrets.signature_key,
jwt_algorithms=Secrets.signature_key_alg,
protect_hosts=protected_hosts,
)
if secure_access in [True, 'true', 'True']:
app.include_router(scheduler_router, dependencies=[Depends(auth)])
else:
app.include_router(scheduler_router)
app.include_router(health_check)
service_obj = Service()
if __name__ == "__main__":
try:
logger.info("Starting the scheduler framework")
scheduler.start()
logger.info("Scheduler framework started successfully")
uvicorn.run(app, host=service_obj.host, port=service_obj.port)
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
raise
[tool.black]
line-length = 120
[tool.isort]
profile = "black"
[tool.ruff]
select = [
"E", # pycodestyle errors
"W", # pycodestyle warnings
"F", # pyflakes
# "I", # isort
"C", # flake8-comprehensions
"B", # flake8-bugbear
]
ignore = [
"E501", # line too long, handled by black
"B008", # do not perform function calls in argument defaults
"C901", # too complex
"E402",
"B904",
"B905",
"B009"
]
[tool.ruff.per-file-ignores]
"__init__.py" = ["F401"]
APScheduler==3.7.0
fastapi==0.65.2
jwt-signature-validator~=0.0.1
pre-commit~=2.20.0
pyaml==20.4.0
pydantic==1.8.2
PyJWT==2.4.0
pymongo==3.11.3
python-dotenv~=0.17.1
pytz==2021.1
redis~=3.5.3
requests==2.25.1
uvicorn[standard]~=0.18.2
cryptography==38.0.3
orjson==3.8.1
setuptools==65.5.1
\ No newline at end of file
if __name__ == '__main__':
from dotenv import load_dotenv
load_dotenv()
import os
import sys
from configparser import BasicInterpolation, ConfigParser
class EnvInterpolation(BasicInterpolation):
"""
Interpolation which expands environment variables in values.
"""
def before_get(self, parser, section, option, value, defaults):
value = super().before_get(parser, section, option, value, defaults)
if not os.path.expandvars(value).startswith('$'):
return os.path.expandvars(value)
else:
return
try:
config = ConfigParser(interpolation=EnvInterpolation())
config.read(f"conf/application.conf")
except Exception as e:
print(f"Error while loading the config: {e}")
print("Failed to Load Configuration. Exiting!!!")
sys.stdout.flush()
sys.exit()
class PathToStorage:
BASE_PATH = config.get("PATH", "base_path")
if not BASE_PATH:
print("Error, environment variable BASE_PATH not set")
sys.exit(1)
MOUNT_DIR = config.get("PATH", "mount_dir")
if not MOUNT_DIR:
print("Error, environment variable MOUNT_DIR not set")
sys.exit(1)
LOGS_MODULE_PATH = f"{BASE_PATH}/logs{MOUNT_DIR}/"
class Service:
host = config.get("SERVICE", "host")
port = config.getint("SERVICE", "port")
enable_security = config.getboolean("SERVICE", "enable_security")
allow_cross_origin = config.getboolean("SERVICE", "allow_cross_origin")
secure_cookie = config.getboolean("SERVICE", "secure_cookie", fallback=True)
class Mongo:
uri = config.get("MONGO_DB", "uri")
class SchedulerConf:
scheduler_thread = config.get("SCHEDULER", "SCHEDULER_THREAD")
scheduler_thread = 120 if not scheduler_thread else int(scheduler_thread)
scheduler_process = config.get("SCHEDULER", "SCHEDULER_PROCESS")
scheduler_process = 5 if not scheduler_process else int(scheduler_process)
max_instance = config.get("SCHEDULER", "MAX_INSTANCE")
max_instance = 50 if not max_instance else int(max_instance)
misfire_grace_time_in_s = config.getint("SCHEDULER", "MISFIRE_SEC", fallback=3)
class KeyPath(object):
public = os.path.join("assets", "keys", "public")
private = os.path.join("assets", "keys", "private")
# Redis Details
redis_section = "REDIS"
redis_uri = config[redis_section]["REDIS_URI"]
redis_login_db = int(config[redis_section]["login_db"])
user_role_permissions = config[redis_section]["user_role_permissions"]
project_tags_db = config[redis_section]["project_tags_db"]
class APIEndpoints:
# scheduler APIs
scheduler_base = "/scheduler"
schedule = "/schedule"
delete_schedule = "/delete"
fetch_schedule_id = "/fetch/schedule_id"
fetch_schedules = "/fetch/all_schedules"
fetch_schedule_details = "/fetch/schedule_details"
fetch_schedule_details_table = "/fetch/schedule_details_table"
class DatabaseConstants:
db_ilens_configuration = "ilens_configuration"
collection_scheduled_metadata = "schedule_metadata"
collection_scheduled_jobs = "scheduled_jobs"
collection_user = "user"
collection_user_project = "user_project"
collection_scheduled_job_runs = "scheduled_job_runs"
class CommonKeys:
schedule_id = "schedule_id"
KEY_WEEKLY = "weekly"
KEY_MONTHLY = "monthly"
KEY_YEARLY = "yearly"
KEY_ONCE = "once"
KEY_MONTH = "month"
KEY_HOUR = "hour"
KEY_DAY = "day"
KEY_MINUTE = "minute"
KEY_DAILY = "daily"
KEY_YEAR = "year"
KEY_DAY_OF_WEEK = "day_of_week"
KEY_DAY_OF_MONTH = "day_of_month"
KEY_MONTH_OF_YEAR = "month_of_year"
KEY_CRON_DICT = "cron_dict"
KEY_WEEK = "week"
KEY_RECURSION_COUNT = "recursion_count"
KEY_TIMEZONE = "timezone"
class Secrets:
LOCK_OUT_TIME_MINS = 30
leeway_in_mins = 10
unique_key = "45c37939-0f75"
token = "8674cd1d-2578-4a62-8ab7-d3ee5f9a"
issuer = "ilens"
alg = "RS256"
signature_key = "kliLensKLiLensKL"
signature_key_alg = ["HS256"]
class APIJobConstants:
response_codes = {
"200": dict(status=True, message="Job response for #schedule_id#: #msg#"),
"210": dict(status=True, message="No response expected from a UDP protocol"),
"401": dict(
status=False,
message="Job faced an authorization issue when accessing the API '#url#': #err#",
),
"403": dict(
status=False,
message="Job faced an authentication issue when accessing the API '#url#': #err#",
),
"404": dict(
status=False,
message="Job was unable to find the URL '#url#': #err#",
),
}
validation_failed = "Request data model validation failed!"
scheduling_failure = "Faced a problem when scheduling the job"
from scripts.utils.mongo_util import MongoConnect
from scripts.constants import app_configuration
mongo_util = MongoConnect(uri=app_configuration.Mongo.uri)
from apscheduler.executors.pool import ProcessPoolExecutor, ThreadPoolExecutor
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from pytz import utc
from scripts.constants.app_configuration import Mongo, SchedulerConf
from scripts.constants.app_constants import DatabaseConstants
from scripts.logging.logging import logger
database_name = DatabaseConstants.db_ilens_configuration
collection = DatabaseConstants.collection_scheduled_jobs
job_store = {
"default": MongoDBJobStore(
database=database_name,
collection=collection,
host=Mongo.uri,
connect=False,
)
}
executors = {
"default": ThreadPoolExecutor(SchedulerConf.scheduler_thread),
"processpool": ProcessPoolExecutor(SchedulerConf.scheduler_process),
}
job_defaults = {"coalesce": False, "misfire_grace_time": SchedulerConf.misfire_grace_time_in_s, "max_instances": SchedulerConf.max_instance}
logger.info("Scheduler Initialising")
scheduler = BackgroundScheduler(
jobstores=job_store,
executors=executors,
job_defaults=job_defaults,
timezone=utc,
daemon=True,
)
logger.info("Scheduler Initialised")
import random
import time
from datetime import datetime, timezone
import requests
from apscheduler.triggers.cron import CronTrigger
from pytz import timezone as pytz_tz
from scripts.constants.app_constants import (
APIJobConstants,
CommonKeys,
DatabaseConstants as Db,
)
from scripts.constants.db_connection import mongo_util
from scripts.core.engine.scheduler_engine import scheduler
from scripts.database.mongo.ilens_configuration.collections.schedule_metadata import ScheduleMetadataCollection
from scripts.errors.exceptions import ILensSchedulerError, ErrorMessages
from scripts.logging.logging import logger
from scripts.schemas.scheduler_schema import JobDetails, ScheduleJobRequest
from scripts.utils.common_utils import create_login_token
schedule_metadata_collection = ScheduleMetadataCollection()
class SchedulerHandler(object):
def __init__(self):
self.response_code_mapping = APIJobConstants.response_codes
@staticmethod
async def fetch_schedule_id(request_data):
try:
return schedule_metadata_collection.get_schedule_id(
filters=request_data.filters
)
except Exception as e:
logger.exception(e)
raise
@staticmethod
def tz_diff(home_tz, away_tz):
date = datetime.now()
home = pytz_tz(home_tz)
away = pytz_tz(away_tz)
diff = (
home.localize(date) - away.localize(date).astimezone(home)
).seconds / 3600
return int(diff), int((diff - int(diff)) * 60)
@staticmethod
async def fetch_all_schedule(request_data):
try:
return schedule_metadata_collection.get_all_schedules(
filters=request_data.filters
)
except Exception as e:
logger.exception(e)
raise
@staticmethod
async def fetch_schedule_details(schedule_id):
try:
return schedule_metadata_collection.get_schedule_details(
schedule_id=schedule_id
)
except Exception as e:
logger.exception(e)
raise
@staticmethod
async def fetch_schedule_table(project_id):
try:
filters = {"project_id": project_id}
data = schedule_metadata_collection.get_all_schedules(filters=filters)
header = [
{"key": "job_type", "label": "Schedule Job Type"},
{"key": "trigger_type", "label": "Trigger Type"},
]
body = []
for each in data:
job_type = each.get("job_type")
trigger_type = each["schedule_properties"].get("trigger_type")
schedule_id = each.get("schedule_id")
body.append(
{
"trigger_type": trigger_type,
"job_type": job_type,
"schedule_id": schedule_id,
}
)
return dict(header_content=header, body_content=body)
except Exception as e:
logger.exception(e)
raise
@staticmethod
async def delete_schedule(schedule_id):
try:
schedule_metadata_collection.delete_schedule(schedule_id=schedule_id)
if scheduler.get_job(job_id=schedule_id):
scheduler.remove_job(job_id=schedule_id)
return True
except Exception as e:
logger.exception(e)
raise
def create_scheduled_job(self, input_json: ScheduleJobRequest):
try:
save_msg = "Saving scheduler metadata to db"
schedule_id = input_json.schedule_id
logger.info(save_msg)
if (
input_json.schedule_type is not None
and input_json.schedule_type == "advance"
):
logger.info(save_msg)
schedule_id = self.save_advanced_properties_to_mongo(
input_json, schedule_id
)
self.add_job_to_scheduler(input_json, schedule_id)
return {"schedule_id": schedule_id}
schedule_id = self.save_to_mongo(input_json, schedule_id=schedule_id)
scheduler_type = input_json.scheduler_type
if scheduler_type:
logger.info(save_msg)
self.add_job_to_scheduler(input_json, schedule_id)
else:
schedule_id = self.save_to_mongo(
input_json, schedule_id=schedule_id, saved_on_edge=False
)
logger.warning(
f"Scheduler with id '{schedule_id}' was not saved on edge"
)
return {"schedule_id": schedule_id}
except Exception as e:
raise ILensSchedulerError(f"{ErrorMessages.SCHEDULE_META_DATA}: {e}")
def update_job_run_meta(self, **kwargs):
try:
db_data = {}
create_run = kwargs.get("create_run", False)
executor_properties = kwargs.get("executor", dict())
input_properties = kwargs.get("input", dict())
output_properties = kwargs.get("output", dict())
run_id = kwargs.get("run_id", None)
schedule_id = kwargs.get("schedule_id", None)
status = kwargs.get("status", None)
if create_run:
run_id = self.generate_id(prefix="run_")
db_data.update(
dict(
run_id=run_id,
schedule_id=schedule_id,
input=input_properties,
output=output_properties,
executor=executor_properties,
status=status,
)
)
mongo_util.insert_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_job_runs,
data=db_data,
)
else:
query = dict(run_id=run_id, schedule_id=schedule_id)
db_data.update(dict(status=status, output=output_properties))
mongo_util.update_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_job_runs,
query=query,
data=db_data,
)
return run_id
except Exception as e:
raise ILensSchedulerError(f"{ErrorMessages.JOB_RUN_META_DATA}: {e}")
def api_trigger(self, **kwargs):
try:
logger.info("Executing an API based job")
request_url = kwargs.get("_api_url_", None)
request_method = kwargs.get("_api_method_", None)
request_payload = kwargs.get("_payload_", None)
schedule_id = kwargs.get("_schedule_id_", None)
project_id = kwargs.get("_project_id_", None)
user_id = kwargs.get("_user_id_", None)
try:
run_id = self.update_job_run_meta(
schedule_id=schedule_id,
create_run=True,
status="submitted",
executor=dict(type="api", url=request_url, method=request_method),
input=dict(payload=request_payload),
)
except Exception as e:
logger.critical(
f"iLens Scheduler faced a problem when generating a run id: {e}"
)
return False
try:
kwargs = dict(url=request_url)
system_run_params = dict(_schedule_id_=schedule_id, _run_id_=run_id)
kwargs.update(dict(params=system_run_params))
if request_payload is not None:
request_payload.update(system_run_params)
kwargs.update(dict(json=request_payload))
kwargs["cookies"] = {
"login-token": create_login_token(user_id=user_id, project_id=project_id), "project_id": project_id, "user_id": user_id,
"userId": user_id, "projectId": project_id
}
kwargs["headers"] = {
"login-token": create_login_token(user_id=user_id, project_id=project_id),
"projectId": project_id,
}
try:
self.update_job_run_meta(
status="running", run_id=run_id, schedule_id=schedule_id
)
res = getattr(requests, request_method)(**kwargs)
except AttributeError:
self.update_job_run_meta(
status="failed",
run_id=run_id,
schedule_id=schedule_id,
output=dict(
error=f"Unsupported method '{request_method}' was given"
),
)
logger.critical(
f"Unsupported method '{request_method}' was given. Failing the job."
)
raise ILensSchedulerError(
f"Unsupported method '{request_method}' was given. Failing the job."
)
status_code = res.status_code
res_message = res.json().get("message", None)
res_error = res.json().get("error", None)
status_code_mapping = self.response_code_mapping.get(
str(status_code), None
)
if status_code_mapping is None:
self.update_job_run_meta(
status="failed",
run_id=run_id,
schedule_id=schedule_id,
output=dict(
error=f"Job received a status code '{status_code}' from API response"
),
)
logger.error(
f"Job received a status code '{status_code}' from API response"
)
return False
else:
status = status_code_mapping.get("status", False)
message = status_code_mapping.get("message", None)
message = (
message.replace("#msg#", str(res_message))
.replace("#schedule_id#", str(schedule_id))
.replace("#url#", str(request_url))
.replace("#err#", str(res_error))
)
if status:
logger.info(message)
self.update_job_run_meta(
status="completed",
run_id=run_id,
schedule_id=schedule_id,
output=dict(message=message),
)
return True
else:
logger.warning(message)
self.update_job_run_meta(
status="failed",
run_id=run_id,
schedule_id=schedule_id,
output=dict(error=message),
)
return False
except Exception as e:
logger.error(
f"Scheduler faced an issue when running an API trigger job with run id {run_id}: {e}"
)
self.update_job_run_meta(
status="failed",
run_id=run_id,
schedule_id=schedule_id,
output=dict(error=f"Api Job failed with the following error: {e}"),
)
return False
except Exception as e:
logger.error(
"Scheduler faced an unknown issue when running a job",
exc_info=True,
)
return False
@staticmethod
def function_trigger():
logger.info("Executing a function based job")
def execute_job(self, job_details: JobDetails, schedule_id, project_id, user_id):
try:
job_type = job_details.execution_method
job_properties = job_details.execution_properties
if job_type == "api":
logger.debug("Executing job of type api")
trigger_args = []
trigger_kwargs = {}
api_url = job_properties.api_url
api_method = job_properties.api_method
payload = job_properties.payload
trigger_kwargs.update(
dict(
_payload_=payload,
_api_url_=api_url,
_api_method_=api_method,
_schedule_id_=schedule_id,
_project_id_=project_id,
_user_id_=user_id,
)
)
return self.api_trigger, trigger_args, trigger_kwargs
elif job_type == "func":
logger.debug("Executing job of type function")
raise NotImplementedError(
"Scheduler job type for 'func' is not implemented for this version of iLens"
)
else:
raise ILensSchedulerError(
f"Job type '{job_type}' is not supported by iLens scheduler currently"
)
except Exception as e:
raise ILensSchedulerError(
f"{ErrorMessages.NECESSARY_JOB_DETAILS}: {e}"
)
def add_job_to_scheduler(self, input_data: ScheduleJobRequest, schedule_id):
try:
if (
input_data.schedule_type is not None
and input_data.schedule_type == "advance"
):
cron_dict = self.get_advanced_cron_dict(
input_data.advanced_schedule_properties
)
else:
cron_dict = self.get_cron_dict(input_data)
logger.debug(f"Cron data: {cron_dict}")
crontrigger = CronTrigger(**cron_dict)
logger.info(f"Creating job for '{schedule_id}'")
job_details = input_data.job_details
project_id = input_data.project_id
user_id = input_data.user_id
trigger_obj, trigger_args, trigger_kwargs = self.execute_job(
job_details=job_details,
schedule_id=schedule_id,
project_id=project_id,
user_id=user_id,
)
logger.debug("Submitting job to scheduler framework")
existing_job = scheduler.get_job(job_id=schedule_id)
if existing_job is not None:
scheduler.remove_job(job_id=schedule_id)
if scheduler.state != 1:
logger.info(
"Scheduler went to inactive state, Activating it to add new job"
)
scheduler.start()
logger.info(f"Scheduler state: {scheduler.state}")
scheduler_obj = scheduler.add_job(
trigger_obj,
crontrigger,
id=schedule_id,
args=trigger_args,
kwargs=trigger_kwargs,
)
logger.info("Job added to scheduler")
return scheduler_obj
except Exception as e:
raise ILensSchedulerError(f"{ErrorMessages.ERROR_ADD_JOB}: {e}")
def save_advanced_properties_to_mongo(self, input_data, schedule_id=None):
try:
if schedule_id in [None, ""]:
schedule_id = self.generate_id(prefix="job_")
job_details = input_data.job_details.dict(skip_defaults=True)
job_type = input_data.job_type
scheduler_type = input_data.scheduler_type
project_id = input_data.project_id
logger.debug(f"Schedule ID: {schedule_id}")
db_data = dict(
schedule_id=schedule_id,
advanced_schedule_properties=input_data.advanced_schedule_properties,
job_details=job_details,
job_type=job_type,
scheduler_type=scheduler_type,
project_id=project_id,
)
logger.debug(f"Pushing data to db: {db_data}")
query = dict(schedule_id=schedule_id)
schedule_data = mongo_util.find_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_metadata,
query=query,
)
if schedule_data is None:
mongo_util.insert_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_metadata,
data=db_data,
)
else:
logger.debug("Updating an existing metadata record")
mongo_util.update_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_metadata,
query=query,
data=db_data,
)
logger.debug("Data stored to db successfully")
return schedule_id
except Exception as e:
raise ILensSchedulerError(f"{ErrorMessages.SCHEDULE_META_DATA}: {e}")
def save_to_mongo(
self,
input_data: ScheduleJobRequest,
schedule_id=None,
saved_on_edge=False,
):
try:
if schedule_id in [None, ""]:
schedule_id = self.generate_id(prefix="job_")
schedule_properties = input_data.schedule_properties.dict(
skip_defaults=True
)
job_details = input_data.job_details.dict(skip_defaults=True)
job_type = input_data.job_type
scheduler_type = input_data.scheduler_type
project_id = input_data.project_id
hierarchy = input_data.hierarchy
logger.debug(f"Schedule ID: {schedule_id}")
db_data = dict(
schedule_id=schedule_id,
schedule_properties=schedule_properties,
job_details=job_details,
job_type=job_type,
scheduler_type=scheduler_type,
project_id=project_id,
hierarchy=hierarchy,
saved_on_edge=saved_on_edge,
)
logger.debug(f"Pushing data to db: {db_data}")
query = dict(schedule_id=schedule_id)
schedule_data = mongo_util.find_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_metadata,
query=query,
)
if schedule_data is None:
mongo_util.insert_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_metadata,
data=db_data,
)
else:
logger.debug("Updating an existing metadata record")
mongo_util.update_one(
database_name=Db.db_ilens_configuration,
collection_name=Db.collection_scheduled_metadata,
query=query,
data=db_data,
)
logger.debug("Data stored to db successfully")
return schedule_id
except Exception as e:
raise ILensSchedulerError(
f"{ErrorMessages.SCHEDULE_META_DATA}: {e}"
)
@staticmethod
def generate_id(prefix):
_id = prefix
timestamp = time.time()
rand = random.randint(1000, 9999)
_id += str(int(timestamp)) + "_" + str(rand)
return _id
@staticmethod
def days_hours_minutes(td):
return td.days, td.seconds // 3600, (td.seconds // 60) % 60
@staticmethod
def system_timezone():
return datetime.now(timezone.utc).astimezone().tzname()
@staticmethod
def get_advanced_cron_dict(scheduling_details):
schedule_frequency = scheduling_details.get("schedule_frequency")
cron_job_dict = {}
recursion_count = scheduling_details.get(CommonKeys.KEY_RECURSION_COUNT)
from_ts = scheduling_details.get("from_date")
start_date = datetime.fromtimestamp(from_ts)
cron_job_dict["start_date"] = start_date
if "to_date" in scheduling_details:
end_ts = scheduling_details.get("to_date")
end_date = datetime.fromtimestamp(end_ts)
cron_job_dict["end_date"] = end_date
logger.info(f"System timezone is {SchedulerHandler.system_timezone()}")
if "hours" in scheduling_details and "minutes" in scheduling_details:
if SchedulerHandler.system_timezone() == "UTC":
home_time = "{hr}:{min}".format(
hr=scheduling_details.get("hours"),
min=scheduling_details.get("minutes"),
)
diff_hours, diff_minutes = SchedulerHandler.tz_diff(
"UTC", scheduling_details.get(CommonKeys.KEY_TIMEZONE)
)
utc_time = "{hr}:{min}".format(hr=diff_hours, min=diff_minutes)
format_ = "%H:%M"
time = datetime.strptime(home_time, format_) - datetime.strptime(
utc_time, format_
)
days, hours, minutes = SchedulerHandler.days_hours_minutes(time)
else:
hours = scheduling_details.get("hours")
minutes = scheduling_details.get("minutes")
else:
hours, minutes = SchedulerHandler.tz_diff(
"UTC", scheduling_details.get(CommonKeys.KEY_TIMEZONE)
)
if schedule_frequency.lower() == CommonKeys.KEY_WEEKLY:
cron_job_dict[CommonKeys.KEY_MINUTE] = str(minutes)
cron_job_dict[CommonKeys.KEY_HOUR] = str(hours)
cron_job_dict[CommonKeys.KEY_DAY] = "*"
cron_job_dict[CommonKeys.KEY_MONTH] = "*"
cron_job_dict[CommonKeys.KEY_DAY_OF_WEEK] = scheduling_details.get(
CommonKeys.KEY_DAY_OF_WEEK, datetime.today().weekday()
)
cron_job_dict[CommonKeys.KEY_WEEK] = "*/" + str(recursion_count)
elif schedule_frequency.lower() == CommonKeys.KEY_MONTHLY:
cron_job_dict[CommonKeys.KEY_MINUTE] = str(minutes)
cron_job_dict[CommonKeys.KEY_HOUR] = str(hours)
cron_job_dict[CommonKeys.KEY_DAY] = str(
scheduling_details.get(
CommonKeys.KEY_DAY_OF_MONTH, datetime.today().day
)
)
cron_job_dict[CommonKeys.KEY_MONTH] = "*/" + str(recursion_count)
cron_job_dict[CommonKeys.KEY_DAY_OF_WEEK] = "*"
elif schedule_frequency.lower() == CommonKeys.KEY_YEARLY:
cron_job_dict[CommonKeys.KEY_YEAR] = "*/" + str(recursion_count)
cron_job_dict[CommonKeys.KEY_MINUTE] = str(minutes)
cron_job_dict[CommonKeys.KEY_HOUR] = str(hours)
cron_job_dict[CommonKeys.KEY_DAY] = str(
scheduling_details.get(
CommonKeys.KEY_DAY_OF_MONTH, datetime.today().day
)
)
cron_job_dict[CommonKeys.KEY_MONTH] = str(
scheduling_details.get(
CommonKeys.KEY_MONTH_OF_YEAR, datetime.today().month
)
)
cron_job_dict[CommonKeys.KEY_DAY_OF_WEEK] = "*"
elif schedule_frequency.lower() == CommonKeys.KEY_DAILY:
cron_job_dict[CommonKeys.KEY_YEAR] = "*"
cron_job_dict[CommonKeys.KEY_MINUTE] = str(minutes)
cron_job_dict[CommonKeys.KEY_HOUR] = str(hours)
cron_job_dict[CommonKeys.KEY_DAY] = "*/" + str(recursion_count)
cron_job_dict[CommonKeys.KEY_MONTH] = "*"
cron_job_dict[CommonKeys.KEY_DAY_OF_WEEK] = "*"
logger.info("cron_job_dict", cron_job_dict)
return cron_job_dict
@staticmethod
def get_cron_dict(input_data: ScheduleJobRequest):
cron_job_dict = {}
rule_engine_data = input_data.schedule_properties
trigger_type = rule_engine_data.trigger_type
trigger_interval = rule_engine_data.trigger_interval
trigger_datetime = rule_engine_data.interval_properties.trigger_date_time
from_date = rule_engine_data.interval_properties.from_date
to_date = rule_engine_data.interval_properties.to_date
configure_daily_time_range = (
rule_engine_data.interval_properties.configure_daily_time_range
)
daily_start_time = rule_engine_data.interval_properties.daily_start_time
daily_end_time = rule_engine_data.interval_properties.daily_end_time
run_on_day = rule_engine_data.interval_properties.run_on_day
run_on_occurrence = rule_engine_data.interval_properties.run_on_occurrence
trigger_time = rule_engine_data.interval_properties.trigger_time
daily_selected_interval = rule_engine_data.interval_properties.selected_interval
minute = rule_engine_data.interval_properties.minute
hour = rule_engine_data.interval_properties.hour
selected_week_days = rule_engine_data.interval_properties.selected_week_days
selected_months = rule_engine_data.interval_properties.selected_months
if trigger_type == "recurring":
if configure_daily_time_range:
daily_start_date = datetime.fromtimestamp(daily_start_time / 1000)
cron_job_dict["start_date"] = daily_start_date
schedule_hour = datetime.fromtimestamp(daily_start_time / 1000).hour
schedule_minute = datetime.fromtimestamp(daily_start_time / 1000).minute
cron_job_dict["hour"] = schedule_hour
cron_job_dict["minute"] = schedule_minute
daily_end_date = datetime.fromtimestamp(daily_end_time / 1000)
cron_job_dict["end_date"] = daily_end_date
if from_date and to_date:
cron_job_dict["start_date"] = datetime.fromtimestamp(from_date / 1000)
cron_job_dict["end_date"] = datetime.fromtimestamp(to_date / 1000)
if run_on_occurrence is not None and run_on_day is not None:
cron_job_dict["day"] = f"{run_on_occurrence} {run_on_day}"
schedule_hour = 00
schedule_minute = 00
if trigger_time:
schedule_hour = str(datetime.fromtimestamp(trigger_time / 1000).hour)
schedule_minute = str(
datetime.fromtimestamp(trigger_time / 1000).minute
)
if trigger_interval == "every-n-minutes":
count = int(minute)
minutes_list = []
i = 00
while i < 59:
minutes_list.append(str(i))
i += count
cron_job_dict["minute"] = ",".join(minutes_list)
elif trigger_interval == "every-n-hourly":
count = int(hour)
hour_list = []
i = 00
while i < 24:
hour_list.append(str(i))
i += count
cron_job_dict["hour"] = ",".join(hour_list)
cron_job_dict["minute"] = "00"
elif trigger_interval in ["daily", "weekly"]:
if schedule_hour and schedule_minute:
cron_job_dict["hour"] = schedule_hour
cron_job_dict["minute"] = schedule_minute
else:
cron_job_dict["hour"] = "00"
cron_job_dict["minute"] = "00"
if trigger_interval == "daily" and daily_selected_interval:
if daily_selected_interval == "every_day":
cron_job_dict["day_of_week"] = "*"
elif daily_selected_interval == "week_days":
cron_job_dict["day_of_week"] = "mon-fri"
elif daily_selected_interval == "weekend":
cron_job_dict["day_of_week"] = "sat-sun"
elif trigger_interval == "weekly" and selected_week_days:
cron_job_dict["day_of_week"] = ",".join(selected_week_days)
elif trigger_interval == "monthly":
date = datetime.fromtimestamp(from_date / 1000)
# logger.trace('-----| Cron Job Dictionary before setting Monthly Meta rules: {}'.
# format(cron_job_dict))
if selected_months:
cron_job_dict["month"] = ",".join(list(selected_months))
else:
cron_job_dict["month"] = "*"
if not cron_job_dict["day"]:
cron_job_dict["day"] = str(date.day)
if schedule_hour and schedule_minute:
cron_job_dict["hour"] = schedule_hour
cron_job_dict["minute"] = schedule_minute
else:
cron_job_dict["hour"] = "00"
cron_job_dict["minute"] = "00"
elif trigger_type == "onetime" and trigger_datetime:
start_date = datetime.fromtimestamp(trigger_datetime / 1000)
cron_job_dict["year"] = str(start_date.year)
cron_job_dict["month"] = str(start_date.month)
cron_job_dict["day"] = str(start_date.day)
cron_job_dict["hour"] = str(start_date.hour)
cron_job_dict["minute"] = str(start_date.minute)
logger.debug(f"cron_dict: {cron_job_dict}")
return cron_job_dict
from scripts.constants.app_constants import DatabaseConstants, CommonKeys
from scripts.constants.db_connection import mongo_util
from scripts.logging.logging import logger
from scripts.utils.mongo_util import MongoCollectionClass
class ScheduleMetadataCollection(MongoCollectionClass):
def __init__(self):
self.database = DatabaseConstants.db_ilens_configuration
self.collection = DatabaseConstants.collection_scheduled_metadata
@property
def key_schedule_id(self):
return CommonKeys.schedule_id
def get_schedule_id(self, filters):
try:
data = mongo_util.find_one(
database_name=self.database,
collection_name=self.collection,
query=filters,
filter_dict={
self.key_schedule_id: 1,
self.key_mongo_default_id: 0,
},
)
return data
except Exception as e:
logger.exception(e)
raise
def get_all_schedules(self, filters):
try:
data = mongo_util.find(
database_name=self.database,
collection_name=self.collection,
query=filters,
)
return data
except Exception as e:
logger.exception(e)
raise
def get_schedule_details(self, schedule_id):
try:
query = {self.key_schedule_id: schedule_id}
data = mongo_util.find_one(
database_name=self.database,
collection_name=self.collection,
query=query,
)
return data
except Exception as e:
logger.exception(e)
raise
def delete_schedule(self, schedule_id):
try:
query = {self.key_schedule_id: schedule_id}
data = mongo_util.delete_one(
database_name=self.database,
collection_name=self.collection,
query=query,
)
return data
except Exception as e:
logger.exception(e)
raise
from scripts.constants.app_constants import DatabaseConstants, CommonKeys
from scripts.constants.db_connection import mongo_util
from scripts.logging.logging import logger
from scripts.utils.mongo_util import MongoCollectionClass
from scripts.constants.db_connection import mongo_util
class User(MongoCollectionClass):
def __init__(self):
self.database = DatabaseConstants.db_ilens_configuration
self.collection = DatabaseConstants.collection_user
def find_user_role_for_user_id(self, user_id, project_id):
query = {"user_id": user_id, "project_id": project_id}
filter_dict = {"userrole": 1, "_id": 0}
return mongo_util.find_one(query=query, filter_dict=filter_dict)
\ No newline at end of file
from scripts.constants.app_constants import DatabaseConstants, CommonKeys
from scripts.constants.db_connection import mongo_util
from scripts.logging.logging import logger
from scripts.utils.mongo_util import MongoCollectionClass
class UserProject(MongoCollectionClass):
def __init__(self):
self.database = DatabaseConstants.db_ilens_configuration
self.collection = DatabaseConstants.collection_user_project
def find_user_role_for_user_id(self, user_id, project_id):
query = {"user_id": user_id, "project_id": project_id}
filter_dict = {"userrole": 1, "_id": 0}
return mongo_util.find_one(query=query, filter_dict=filter_dict)
import redis
from scripts.constants.app_configuration import redis_uri, redis_login_db, user_role_permissions, project_tags_db
login_db = redis.from_url(redis_uri, db=int(redis_login_db), decode_responses=True)
user_role_permissions_redis = redis.from_url(
redis_uri, db=user_role_permissions, decode_responses=True
)
project_details_db = redis.from_url(redis_uri, db=project_tags_db, decode_responses=True)
\ No newline at end of file
class ILensSchedulerError(Exception):
"""Generic ILensSchedulerErrors Error"""
def __init__(self, msg):
Exception.__init__(self, msg)
"""
Base Error Class
"""
class ErrorMessages:
JOB_RUN_META_DATA = "Scheduler faced a problem when storing the job run metadata"
SCHEDULE_META_DATA = "Scheduler faced a problem when storing the schedule metadata"
ERROR_ADD_JOB = "Scheduler faced a problem when adding a job"
NECESSARY_JOB_DETAILS = "Scheduler faced a problem when creating necessary job details"
\ No newline at end of file
class ILensErrors(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class AuthenticationError(ILensErrors):
"""
JWT Authentication Error
"""
class ErrorMessages:
ERROR001 = "Authentication Failed. Please verify token"
ERROR002 = "Signature Expired"
ERROR003 = "Signature Not Valid"
# All error in the MONGO... series are exception codes for errors related to MongoDB
MONGO001 = "Error Code MONGO001: Server was unable to establish connection with MongoDB"
MONGO002 = "Error Code MONGO002: Server faced a problem when inserting document(s) into MongoDB"
MONGO003 = "Error Code MONGO003: Server faced a problem to find the document(s) with the given condition"
MONGO004 = "Error Code MONGO004: Server faced a problem to delete the document(s) with the given condition"
MONGO005 = "Error Code MONGO005: Server faced a problem to update the document(s) with the given condition and data"
MONGO006 = "Error Code MONGO006: Server faced a problem when aggregating the data"
MONGO007 = "Error Code MONGO007: Server faced a problem when closing MongoDB connection"
MONGO008 = "Error Code MONGO008: Found an existing record with the same ID in MongoDB"
MONGO009 = "Error Code MONGO009: Server faced a problem when fetching distinct documents from MongoDB"
MONGO010 = "Error Code MONGO010: Server faced a problem when performing a search and replace in MongoDB"
MONGO011 = "Error Code MONGO011: Server faced a problem when de-serializing MongoDB object"
class ILensException(Exception):
pass
class MongoException(ILensException):
pass
class MongoConnectionException(MongoException):
pass
class MongoQueryException(MongoException):
pass
class MongoEncryptionException(MongoException):
pass
class MongoRecordInsertionException(MongoQueryException):
pass
class MongoFindException(MongoQueryException):
pass
class MongoDeleteException(MongoQueryException):
pass
class MongoUpdateException(MongoQueryException):
pass
class MongoUnknownDatatypeException(MongoEncryptionException):
pass
class MongoDistictQueryException(MongoException):
pass
class MongoFindAndReplaceException(MongoException):
pass
class MongoObjectDeserializationException(MongoException):
pass
logger:
name: ilens
level: DEBUG
handlers:
- type: RotatingFileHandler
max_bytes: 100000000
back_up_count: 5
- type: SocketHandler
host: localhost
port: 23582
- type: StreamHandler
name: ilens_visualizer
import logging
import os
from logging import StreamHandler
from logging.handlers import RotatingFileHandler, SocketHandler
import yaml
from scripts.constants.app_configuration import PathToStorage
# this method is to read the configuration from backup.conf
def read_configuration(file_name):
"""
:param file_name:
:return: all the configuration constants
"""
with open(file_name, 'r') as stream:
try:
return yaml.safe_load(stream)
except Exception as e:
print(f"Failed to load Configuration. Error: {e}")
config = read_configuration("scripts/logging/logger_conf.yml")
logging_config = config["logger"]
def get_logger():
"""
Creates a rotating log
"""
__logger__ = logging.getLogger('')
__logger__.setLevel(logging_config["level"].upper())
log_formatter = '%(asctime)s - %(levelname)-6s - [%(threadName)5s:%(funcName)5s():''' \
'%(lineno)s] - %(message)s'
time_format = "%Y-%m-%d %H:%M:%S"
file_path = PathToStorage.LOGS_MODULE_PATH
formatter = logging.Formatter(log_formatter, time_format)
for each_handler in logging_config["handlers"]:
if each_handler["type"] in ["RotatingFileHandler"]:
if not os.path.exists(file_path):
os.makedirs(file_path)
log_file = os.path.join(f"{file_path}{logging_config['name']}.log")
temp_handler = RotatingFileHandler(log_file,
maxBytes=each_handler["max_bytes"],
backupCount=each_handler["back_up_count"])
temp_handler.setFormatter(formatter)
elif each_handler["type"] in ["SocketHandler"]:
temp_handler = SocketHandler(each_handler["host"], each_handler["port"])
elif each_handler["type"] in ["StreamHandler"]:
temp_handler = StreamHandler()
temp_handler.setFormatter(formatter)
else:
temp_handler = None
__logger__.addHandler(temp_handler)
return __logger__
logger = get_logger()
from typing import Optional, Any
from pydantic import BaseModel
class DefaultResponse(BaseModel):
status: str = "Failed"
message: Optional[str]
data: Optional[Any]
class DefaultFailureResponse(DefaultResponse):
error: Any
import time
from typing import Dict, Optional, List, Any, Union
from pydantic import BaseModel
class IntervalSettings(BaseModel):
# One time
trigger_date_time: int = int(time.time() * 1000)
# Recurring
# # Every n minutes
from_date: int = int(time.time() * 1000)
to_date: int = int(time.time() * 1000)
daily_start_time: Optional[int]
daily_end_time: Optional[int]
configure_daily_time_range: Optional[bool] = False
minute: Optional[int]
# # Every n hour
hour: Optional[int]
# # Daily
selected_interval: Optional[str]
trigger_time: Optional[int]
# # Weekly
selected_week_days: Optional[List]
# # Monthly
selected_months: Optional[List]
date_enabled: Optional[bool] = True
days_enabled: Optional[bool] = False
selected_date: Optional[str]
run_on_day: Optional[str]
run_on_occurrence: Optional[str]
class APIProperties(BaseModel):
api_url: str
api_method: str
payload: Optional[Dict]
class FunctionProperties(BaseModel):
class_name: str
func_name: str
args: Optional[Any]
kwargs: Optional[Dict]
class JobDetails(BaseModel):
execution_method: str
execution_properties: Optional[Union[APIProperties, FunctionProperties]]
class ScheduleProperties(BaseModel):
trigger_type: str = "onetime"
trigger_interval: str
interval_properties: IntervalSettings
class ScheduleMetadata(BaseModel):
created_on: int = int(time.time() * 1000)
created_by: str
last_updated: int = int(time.time() * 1000)
last_updated_by: str
desc: str
class ScheduleJobRequest(BaseModel):
user_id: Optional[str] = "system"
schedule_properties: Optional[ScheduleProperties]
job_details: Optional[JobDetails]
job_type: str
scheduler_type: str = "server"
project_id: str
hierarchy: Optional[str]
schedule_id: str = None
advanced_schedule_properties:Optional[Dict]
schedule_type: Optional[str]
class SchedulesFetchRequest(BaseModel):
filters: Optional[Dict]
class ScheduledIDFetchRequest(SchedulesFetchRequest):
pass
from fastapi import APIRouter
from pydantic import ValidationError
from scripts.constants.app_constants import APIEndpoints, APIJobConstants
from scripts.core.handlers.scheduler_handler import SchedulerHandler
from scripts.logging.logging import logger
from scripts.schemas.response_models import DefaultFailureResponse, DefaultResponse
from scripts.schemas.scheduler_schema import (
ScheduledIDFetchRequest,
ScheduleJobRequest,
SchedulesFetchRequest,
)
scheduler_handler = SchedulerHandler()
scheduler_router = APIRouter(prefix=APIEndpoints.scheduler_base)
health_check = APIRouter()
@health_check.get("/api/iLens-schedular/healthcheck")
async def ping():
return {"status": 200}
@scheduler_router.post(APIEndpoints.schedule, tags=["scheduler"])
async def schedule(request_data: ScheduleJobRequest):
try:
response_json = scheduler_handler.create_scheduled_job(request_data)
return DefaultResponse(status="success", data=response_json)
except ValidationError as e:
logger.error(f"Request data model validation failed: {e.json()}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e.json(),
)
except Exception as e:
logger.error(f"Faced a problem when scheduling the job: {e}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e,
)
@scheduler_router.post(APIEndpoints.fetch_schedule_id, tags=["scheduler"])
async def fetch_schedule_id(request_data: ScheduledIDFetchRequest):
try:
response_json = await scheduler_handler.fetch_schedule_id(request_data)
return DefaultResponse(status="success", data=response_json)
except ValidationError as e:
logger.error(f"Request data model validation failed: {e.json()}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e.json(),
)
except Exception as e:
logger.error(f"Faced a problem when scheduling the job: {e}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.scheduling_failure,
error=e,
)
@scheduler_router.post(APIEndpoints.fetch_schedules, tags=["scheduler"])
async def fetch_schedules(request_data: SchedulesFetchRequest):
try:
response_json = await scheduler_handler.fetch_all_schedule(request_data)
return DefaultResponse(status="success", data=response_json)
except ValidationError as e:
logger.error(f"Request data model validation failed: {e.json()}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e.json(),
)
except Exception as e:
logger.error(f"Faced a problem when scheduling the job: {e}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.scheduling_failure,
error=e,
)
@scheduler_router.get(APIEndpoints.fetch_schedule_details, tags=["scheduler"])
async def fetch_schedule_details(schedule_id: str):
try:
response_json = await scheduler_handler.fetch_schedule_details(
schedule_id=schedule_id
)
return DefaultResponse(status="success", data=response_json)
except ValidationError as e:
logger.error(f"Request data model validation failed: {e.json()}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e.json(),
)
except Exception as e:
logger.error(f"Faced a problem when scheduling the job: {e}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.scheduling_failure,
error=e,
)
@scheduler_router.get(APIEndpoints.fetch_schedule_details_table, tags=["scheduler"])
async def fetch_schedule_table(project_id: str):
try:
response_json = await scheduler_handler.fetch_schedule_table(
project_id=project_id
)
return DefaultResponse(
status="success",
data=response_json,
message="Data Fetched Successfully",
)
except ValidationError as e:
logger.error(f"Request data model validation failed: {e.json()}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e.json(),
)
except Exception as e:
logger.error(f"Faced a problem when scheduling the job: {e}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.scheduling_failure,
error=e,
)
@scheduler_router.get(APIEndpoints.delete_schedule, tags=["scheduler"])
async def fetch_schedule_details(schedule_id: str):
try:
response_json = await scheduler_handler.delete_schedule(schedule_id=schedule_id)
return DefaultResponse(
status="success",
data=response_json,
message="Job Deleted Successfully",
)
except ValidationError as e:
logger.error(f"Request data model validation failed: {e.json()}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.validation_failed,
error=e.json(),
)
except Exception as e:
logger.error(f"Faced a problem when scheduling the job: {e}")
return DefaultFailureResponse(
status="failed",
message=APIJobConstants.scheduling_failure,
error=e,
)
from scripts.constants.app_constants import Secrets
from scripts.logging.logging import logger
from scripts.utils.security_utils.apply_encryption_utility import create_token
def create_login_token(host: str = '127.0.0.1', user_id=None, internal_token=Secrets.token, project_id=None):
"""
This method is to create a cookie
"""
try:
if user_id is None or user_id == "system":
user_id = "user_099"
return create_token(
user_id=user_id,
ip=host,
token=internal_token,
project_id=project_id
)
except Exception as e:
logger.exception(str(e))
raise
"""
Mongo Utility
Author: Irfanuddin Shafi Ahmed
Reference: Pymongo Documentation
"""
import sys
from typing import Dict, List, Optional
from pymongo import MongoClient
class MongoCollectionClass:
@property
def key_mongo_default_id(self):
return "_id"
class MongoException(Exception):
pass
class MongoConnect:
def __init__(self, uri):
try:
self.client = MongoClient(uri, connect=False)
except Exception as e:
print(e)
sys.exit(1)
def insert_one(self, database_name: str, collection_name: str, data: Dict):
"""
The function is used to inserting a document to a collection in a Mongo Database.
:param database_name: Database Name
:param collection_name: Collection Name
:param data: Data to be inserted
:return: Insert ID
"""
try:
db = self.client[database_name]
collection = db[collection_name]
response = collection.insert_one(data)
return response.inserted_id
except Exception as e:
raise MongoException(e)
def insert_many(
self, database_name: str, collection_name: str, data: List
):
"""
The function is used to inserting documents to a collection in a Mongo Database.
:param database_name: Database Name
:param collection_name: Collection Name
:param data: List of Data to be inserted
:return: Insert IDs
"""
try:
db = self.client[database_name]
collection = db[collection_name]
response = collection.insert_many(data)
return response.inserted_ids
except Exception as e:
raise MongoException(e)
def find(
self,
database_name: str,
collection_name: str,
query: Dict,
filter_dict: Optional[Dict] = None,
sort=None,
skip: Optional[int] = 0,
limit: Optional[int] = None,
):
"""
The function is used to query documents from a given collection in a Mongo Database
:param database_name: Database Name
:param collection_name: Collection Name
:param query: Query Dictionary
:param filter_dict: Filter Dictionary
:param sort: List of tuple with key and direction. [(key, -1), ...]
:param skip: Skip Number
:param limit: Limit Number
:return: List of Documents
"""
if sort is None:
sort = list()
if filter_dict is None:
filter_dict = {"_id": 0}
try:
db = self.client[database_name]
collection = db[collection_name]
if len(sort) > 0:
cursor = (
collection.find(query, filter_dict).sort(sort).skip(skip)
)
else:
cursor = collection.find(query, filter_dict).skip(skip)
if limit:
cursor = cursor.limit(limit)
response = list(cursor)
cursor.close()
return response
except Exception as e:
raise MongoException(e)
def find_one(
self,
database_name: str,
collection_name: str,
query: Dict,
filter_dict: Optional[Dict] = None,
):
try:
if filter_dict is None:
filter_dict = {"_id": 0}
db = self.client[database_name]
collection = db[collection_name]
response = collection.find_one(query, filter_dict)
return response
except Exception as e:
raise MongoException(e)
def update_one(
self,
database_name: str,
collection_name: str,
query: Dict,
data: Dict,
upsert: bool = False,
):
"""
:param upsert:
:param database_name:
:param collection_name:
:param query:
:param data:
:return:
"""
try:
db = self.client[database_name]
collection = db[collection_name]
response = collection.update_one(
query, {"$set": data}, upsert=upsert
)
return response.modified_count
except Exception as e:
raise MongoException(e)
def delete_many(
self, database_name: str, collection_name: str, query: Dict
):
"""
:param database_name:
:param collection_name:
:param query:
:return:
"""
try:
db = self.client[database_name]
collection = db[collection_name]
response = collection.delete_many(query)
return response.deleted_count
except Exception as e:
raise MongoException(e)
def delete_one(
self, database_name: str, collection_name: str, query: Dict
):
"""
:param database_name:
:param collection_name:
:param query:
:return:
"""
try:
db = self.client[database_name]
collection = db[collection_name]
response = collection.delete_one(query)
return response.deleted_count
except Exception as e:
raise MongoException(e)
def distinct(
self,
database_name: str,
collection_name: str,
query_key: str,
filter_json: Optional[Dict] = None,
):
"""
:param database_name:
:param collection_name:
:param query_key:
:param filter_json:
:return:
"""
try:
db = self.client[database_name]
collection = db[collection_name]
response = collection.distinct(query_key, filter_json)
return response
except Exception as e:
raise MongoException(e)
import uuid
from datetime import timedelta, datetime
from scripts.constants.app_constants import Secrets
from scripts.database.redis_connections import login_db
from scripts.utils.security_utils.jwt_util import JWT
jwt = JWT()
def create_token(user_id, ip, token, age=Secrets.LOCK_OUT_TIME_MINS, login_token=None, project_id=None):
"""
This method is to create a cookie
"""
try:
uid = login_token
if not uid:
uid = str(uuid.uuid4()).replace("-", "")
payload = {
"ip": ip,
"user_id": user_id,
"token": token,
"uid": uid,
"age": age
}
if project_id:
payload["project_id"] = project_id
exp = datetime.utcnow() + timedelta(minutes=age)
_extras = {"iss": Secrets.issuer, "exp": exp}
_payload = {**payload, **_extras}
new_token = jwt.encode(_payload)
# Add session to redis
login_db.set(uid, new_token)
login_db.expire(uid, timedelta(minutes=age))
return uid
except Exception:
raise
from secrets import compare_digest
from typing import Optional
from fastapi import HTTPException, Request, Response, status
from fastapi.openapi.models import APIKey, APIKeyIn
from fastapi.security import APIKeyCookie
from fastapi.security.api_key import APIKeyBase
from pydantic import BaseModel, Field
from scripts.constants.app_configuration import Service
from scripts.constants.app_constants import Secrets
from scripts.database.redis_connections import login_db
from scripts.logging.logging import logger
from scripts.utils.security_utils.apply_encryption_utility import create_token
from scripts.utils.security_utils.jwt_util import JWT
class CookieAuthentication(APIKeyBase):
"""
Authentication backend using a cookie.
Internally, uses a JWT token to store the data.
"""
scheme: APIKeyCookie
cookie_name: str
cookie_secure: bool
def __init__(
self,
cookie_name: str = "login-token",
):
super().__init__()
self.model: APIKey = APIKey(**{"in": APIKeyIn.cookie}, name=cookie_name)
self.scheme_name = self.__class__.__name__
self.cookie_name = cookie_name
self.scheme = APIKeyCookie(name=self.cookie_name, auto_error=False)
self.login_redis = login_db
self.jwt = JWT()
async def __call__(self, request: Request, response: Response) -> str:
cookies = request.cookies
login_token = cookies.get("login-token")
if not login_token:
login_token = request.headers.get("login-token")
if not login_token:
raise HTTPException(status_code=401)
jwt_token = self.login_redis.get(login_token)
# logger.debug(f'jwt token: {jwt_token}')
if not jwt_token:
raise HTTPException(status_code=401)
try:
decoded_token = self.jwt.validate(token=jwt_token)
if not decoded_token:
raise HTTPException(status_code=401)
except Exception as e:
logger.debug(f'Exception in decoded token: {str(e)}')
raise HTTPException(status_code=401, detail=e.args)
user_id = decoded_token.get("user_id")
project_id = decoded_token.get("project_id")
cookie_user_id = request.cookies.get(
"user_id", request.cookies.get(
"userId", request.headers.get("userId")
))
_token = decoded_token.get("token")
_age = int(decoded_token.get("age", Secrets.LOCK_OUT_TIME_MINS))
if not compare_digest(Secrets.token, _token):
raise HTTPException(status_code=401)
if login_token != decoded_token.get("uid"):
raise HTTPException(status_code=401)
if cookie_user_id and not compare_digest(user_id, cookie_user_id):
raise HTTPException(status_code=401)
try:
new_token = create_token(
user_id=user_id,
ip=request.client.host,
token=Secrets.token,
age=_age,
login_token=login_token,
project_id=project_id
)
except Exception as e:
logger.debug(f'Exception in create token: {str(e)}')
raise HTTPException(status_code=401, detail=e.args)
response.set_cookie(
'login-token',
new_token,
samesite='strict',
httponly=True,
secure=Service.secure_cookie,
max_age=Secrets.LOCK_OUT_TIME_MINS * 60,
)
response.headers['login-token'] = new_token
# If project ID is null, this is susceptible to 500 Status Code. Ensure token formation has project ID in
# # login token
response.headers.update({"login-token": new_token,
"projectId": project_id,
"project_id": project_id,
"userId": user_id,
"user_id": user_id})
return user_id
class MetaInfoSchema(BaseModel):
projectId: Optional[str] = ""
project_id: Optional[str] = ""
user_id: Optional[str] = ""
language: Optional[str] = ""
ip_address: Optional[str] = ""
login_token: Optional[str] = Field(alias="login-token")
class Config:
allow_population_by_field_name = True
class MetaInfoCookie(APIKeyBase):
"""
Project ID backend using a cookie.
"""
scheme: APIKeyCookie
def __init__(self):
super().__init__()
self.model: APIKey = APIKey(**{"in": APIKeyIn.cookie}, name="meta")
self.scheme_name = self.__class__.__name__
def __call__(self, request: Request, response: Response):
cookies = request.cookies
cookie_json = {
"projectId": cookies.get("projectId", request.headers.get("projectId")),
"userId": cookies.get("user_id", cookies.get("userId", request.headers.get("userId"))),
"language": cookies.get("language", request.headers.get("language")),
}
return MetaInfoSchema(
project_id=cookie_json["projectId"],
user_id=cookie_json["userId"],
projectId=cookie_json["projectId"],
language=cookie_json["language"],
ip_address=request.client.host,
login_token=cookies.get("login-token"),
)
class GetUserID(APIKeyBase):
"""
Project ID backend using a cookie.
"""
scheme: APIKeyCookie
def __init__(self):
super().__init__()
self.model: APIKey = APIKey(**{"in": APIKeyIn.cookie}, name="user_id")
self.scheme_name = self.__class__.__name__
def __call__(self, request: Request, response: Response):
if user_id := request.cookies.get("user_id", request.cookies.get("userId", request.headers.get("userId"))):
return user_id
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
import jwt
from jwt.exceptions import (
InvalidSignatureError,
ExpiredSignatureError,
MissingRequiredClaimError,
)
from scripts.constants.app_configuration import KeyPath
from scripts.constants.app_constants import Secrets
from scripts.exceptions import AuthenticationError, ErrorMessages
from scripts.logging.logging import logger
class JWT:
def __init__(self):
self.max_login_age = Secrets.LOCK_OUT_TIME_MINS
self.issuer = Secrets.issuer
self.alg = Secrets.alg
self.public = KeyPath.public
self.private = KeyPath.private
def encode(self, payload):
try:
logger.debug('Inside encode')
with open(self.private, "r") as f:
key = f.read()
return jwt.encode(payload, key, algorithm=self.alg)
except Exception as e:
logger.debug(f'Exception in encode: {str(e)}')
raise
finally:
f.close()
def validate(self, token):
try:
logger.debug(f'Inside validate')
with open(self.public, "r") as f:
key = f.read()
payload = jwt.decode(
token,
key,
algorithms=self.alg,
leeway=Secrets.leeway_in_mins,
options={"require": ["exp", "iss"]},
)
return payload
except InvalidSignatureError:
raise AuthenticationError(ErrorMessages.ERROR003)
except ExpiredSignatureError:
raise AuthenticationError(ErrorMessages.ERROR002)
except MissingRequiredClaimError:
raise AuthenticationError(ErrorMessages.ERROR002)
except Exception as e:
logger.debug(f'Exception in validate: {str(e)}')
raise
finally:
f.close()
import logging
from datetime import timedelta, datetime, timezone
from functools import lru_cache, wraps
import orjson as json
from fastapi import HTTPException, Request, status
from scripts.database.mongo.ilens_configuration.collections.user import User
from scripts.database.mongo.ilens_configuration.collections.user_project import UserProject
from scripts.database.redis_connections import user_role_permissions_redis
def timed_lru_cache(seconds: int = 10, maxsize: int = 128):
def wrapper_cache(func):
func = lru_cache(maxsize=maxsize)(func)
func.lifetime = timedelta(seconds=seconds)
func.expiration = datetime.now(timezone.utc) + func.lifetime
@wraps(func)
def wrapped_func(*args, **kwargs):
if datetime.now(timezone.utc) >= func.expiration:
logging.debug("Cache Expired")
func.cache_clear()
func.expiration = datetime.now(timezone.utc) + func.lifetime
return func(*args, **kwargs)
return wrapped_func
return wrapper_cache
@timed_lru_cache(seconds=60, maxsize=1000)
def get_user_role_id(user_id, project_id):
logging.debug("Fetching user role from DB")
user_conn = User() # user collection from ilens_configuration DB
if user_role := user_conn.find_user_role_for_user_id(user_id=user_id, project_id=project_id):
return user_role["userrole"][0]
# if user not found in primary collection, check if user is in project collection
user_proj_conn = UserProject() # user_project collection from ilens_configuration DB
if user_role := user_proj_conn.find_user_role_for_user_id(user_id=user_id, project_id=project_id):
return user_role["userrole"][0]
class RBAC:
def __init__(self, entity_name: str, operation: list[str]):
self.entity_name = entity_name
self.operation = operation
def check_permissions(self, user_id: str, project_id: str) -> dict[str, bool]:
user_role_id = get_user_role_id(user_id, project_id)
if not user_role_id:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User role not found!")
r_key = f"{project_id}__{user_role_id}" # eg: project_100__user_role_100
user_role_rec = user_role_permissions_redis.hget(r_key, self.entity_name)
if not user_role_rec:
return {} # TODO: raise exception here
user_role_rec = json.loads(user_role_rec)
if permission_dict := {i: True for i in self.operation if user_role_rec.get(i)}:
return permission_dict
else:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient Permission!")
def __call__(self, request: Request) -> dict[str, bool]:
user_id = request.cookies.get("userId", request.headers.get("userId"))
project_id = request.cookies.get("projectId", request.headers.get("projectId"))
return self.check_permissions(user_id=user_id, project_id=project_id)
deployment:
environmentVar:
- name: MODULE_NAME
value: "main"
- name: PORT
value: "28595"
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: mongo-creds
key: MONGO_URI
- name: APP_ENV
value: prod
- name: APP_NAME
value: "ilens_scheduler"
- name: SCHEDULER_THREAD
value: "120"
- name: SCHEDULER_PROCESS
value: "20"
- name: MAX_INSTANCE
value: "200"
- name: MISFIRE_SEC
value: "180"
- name: BASE_PATH
value: "/code/data"
- name: MOUNT_DIR
value: "/ilens_scheduler"
- name: REDIS_URI
value: "redis://redis-db-service.ilens-infra:6379"
- name: SECURE_ACCESS
value: "True"
- name: CORS_URLS
value: "https://qa.ilens.io,https://staging.ilens.io"
- name: SW_DOCS_URL
value: "/docs"
- name: SW_OPENAPI_URL
value: "/openapi.json"
- name: ENABLE_CORS
value: "True"
- name: SECURE_COOKIE
value: "True"
- name: VERIFY_SIGNATURE
value: "True"
- name: PROTECTED_HOSTS
value: "*.unifytwin.com,*.ilens.io"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment