Commit 83fa1930 authored by aakash.bedi's avatar aakash.bedi

first commit

parent df41e89b
Pipeline #33196 failed with stage
__pycache__/
\ No newline at end of file
stages:
- auto-tagging
- deploy
- update
variables:
MYSQL_CONNECTION: "mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASS "
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $VERSION_DB.$DB_TABLE WHERE category='Server' AND type='Service' AND os='docker' AND module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -e "INSERT INTO $VERSION_DB.$DB_TABLE values('Server','Service','$CI_PROJECT_NAME','docker', '2', '0', '0', '0')";fi
- QA=$($MYSQL_CONNECTION -N -e "SELECT qa FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- DEV=$($MYSQL_CONNECTION -N -e "SELECT dev FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- UAT=$(mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASS -N -e "SELECT uat FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- PROD=$($MYSQL_CONNECTION -N -e "SELECT prod FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
auto-tagging:
stage: auto-tagging
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $VERSION_DB.$VERSION_RELEASE_TABLE WHERE module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -N -e "INSERT INTO $VERSION_DB.$VERSION_RELEASE_TABLE values('$CI_PROJECT_NAME', 'iLens', '5', '13', '0', '0')";fi
- ILENS=$($MYSQL_CONNECTION -N -e "SELECT ilens_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- RELEASE=$($MYSQL_CONNECTION -N -e "SELECT release_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- FEATURE=$($MYSQL_CONNECTION -N -e "SELECT feature_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- PATCH=$($MYSQL_CONNECTION -N -e "SELECT patch_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
script:
- SOURCE_BRANCH=$(echo $CI_COMMIT_TITLE | cut -f 3 -d " " | cut -f 1 -d "/" | cut -f 2 -d "'")
- >
if [ "$SOURCE_BRANCH" = "QA" ]; then
((RELEASE=RELEASE+1)) && FEATURE=0 && PATCH=0;
TAG_NAME=v$ILENS.$RELEASE.$FEATURE
IMAGE_URL=azrilensprod.azurecr.io/ilens/release/versions/v"$ILENS.$RELEASE:$CI_PROJECT_NAME-$TAG_NAME"
PROD=$RELEASE; QA=0; DEV=0;
$MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name='$CI_PROJECT_NAME' AND type='Service' AND category='Server' AND os='docker'"
elif [ $SOURCE_BRANCH == "feature" ]; then
((FEATURE=FEATURE+1)) && PATCH=0;
TAG_NAME=v$ILENS.$RELEASE.$FEATURE
IMAGE_URL=azrilensprod.azurecr.io/ilens/release/versions/v"$ILENS.$RELEASE:$CI_PROJECT_NAME-$TAG_NAME"
elif [ $SOURCE_BRANCH == "patch" ]; then
((PATCH=PATCH+1));
TAG_NAME=v$ILENS.$RELEASE.$FEATURE.$PATCH
IMAGE_URL=azrilensprod.azurecr.io/ilens/release/versions/v"$ILENS.$RELEASE:$CI_PROJECT_NAME-$TAG_NAME"
else
exit 1
fi
- echo -e "\n\nImage:" $IMAGE_URL >> ReleaseNote.txt
- sed -i "1s|^|Version":" $TAG_NAME\n|" ReleaseNote.txt
- sed -i "1s|^|Module Name":" $CI_PROJECT_NAME\n|" ReleaseNote.txt
- docker build -t $IMAGE_URL .
- docker push $IMAGE_URL
- docker rmi --force $IMAGE_URL
- URL=$(echo $CI_PROJECT_URL | sed 's|https://||')
- git remote set-url origin https://$GIT_USRNAME:$GIT_USRPASSWD@$URL
- git config user.email "devopsilens@gmail.com"
- git config user.name "$GIT_USRNAME"
- git tag -a $TAG_NAME -F ReleaseNote.txt
- git push origin $TAG_NAME
- $MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$VERSION_RELEASE_TABLE SET release_version='$RELEASE', feature_version='$FEATURE', patch_version='$PATCH' WHERE module_name = '$CI_PROJECT_NAME' "
- $MYSQL_CONNECTION -e "INSERT INTO $HISTORY_DB.$VERSION_RELEASE_TABLE values('$CI_JOB_ID', '$CI_PROJECT_NAME','iLens', '$ILENS.$RELEASE.$FEATURE', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
tags:
- shell
only:
- master
#~~~~~| QA K8 AKS |~~~~~#
qa-aks-deployment:
stage: deploy
script:
- REGISTRY_URL=azacrknowledgelens.azurecr.io/knowledgelens/products/ilens/qa
- export KUBECONFIG=/home/gitlab-runner/.kube/config-aks-qa
- NAMESPACE=ilens-core
- QA=`expr $QA + 1` && DEV=0
- docker build -t $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV .
- docker push $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV
only:
- QA
tags:
- shell
tag-update-qa:
stage: update
script:
- QA=`expr $QA + 1` && DEV=0
- REGISTRY_URL=azacrknowledgelens.azurecr.io/knowledgelens/products/ilens/qa
- docker rmi --force $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV
- $MYSQL_CONNECTION -e "INSERT INTO $HISTORY_DB.$DB_TABLE values('$CI_JOB_ID','Server','Service', '$CI_PROJECT_NAME','docker', '$PROD.$QA.$DEV', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
- $MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'"
dependencies:
- qa-aks-deployment
only:
- QA
tags:
- shell
# Default ignored files
/shelf/
/workspace.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="jdk" jdkName="Python 3.9 (pipeline-neshap) (2)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="2">
<item index="0" class="java.lang.String" itemvalue="backports.zoneinfo" />
<item index="1" class="java.lang.String" itemvalue="pandas" />
</list>
</value>
</option>
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="N801" />
<option value="N806" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredIdentifiers">
<list>
<option value="str.__and__" />
</list>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (pipeline-neshap) (2)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/ctcmp-golden-batch.iml" filepath="$PROJECT_DIR$/.idea/ctcmp-golden-batch.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
FROM python:3.8-buster
COPY . /code
WORKDIR /code
RUN pip install -r requirements.txt
CMD [ "python","app.py" ]
\ No newline at end of file
# CTCMP Golden Batch Analytics
\ No newline at end of file
Release Notes:
[2021-10-27] Features:
1. Updated model file with client recommended parameters.
2. Computing every 15 minutes now.
3. Recommendations are in a range now.
[Initial] Features:
1. New model "delta_v1" with an MAE of about 800 being deployed.
\ No newline at end of file
__version__ = "V0.0.1"
from loguru import logger
from datetime import datetime, timedelta
from scripts.core.data.data_import import DataPuller
from scripts.constants.app_configuration import KAIROS_DB_HOST, data_conf, OUTPUT_PARAMETER_QUICK_MAP_INFO, KAFKA_TOPIC, OUTPUT_SITE_ID, TARGET_VALUE
from scripts.core.recommender.parameter_optimizer import ParameterOptimization
import simplejson
from scripts.data_model.egress_data_model import KafkaDataModel
from scripts.core.data.data_export import KafkaProducerUtil
def get_start_time_and_end_time():
logger.info("Finding the start and end time for the data fetch - relative recent data")
# Time Intervals
# 01:00-01:15, 01:15-01:30, 01:30-01:45, 01:45-02:00
current_time = datetime.now()
if current_time.minute >= 45:
# timeframe to consider 30-45
start_time_value = current_time.replace(minute=30, second=0, microsecond=0)
elif current_time.minute >= 30:
# timeframe to consider 15-30
start_time_value = current_time.replace(minute=15, second=0, microsecond=0)
elif current_time.minute >= 10:
# timeframe to consider 00-15
start_time_value = current_time.replace(minute=0, second=0, microsecond=0)
else:
# timeframe to consider 45-00
start_time_value = current_time.replace(minute=45, second=0, microsecond=0)
start_time_value = start_time_value - timedelta(hours=1, minutes=0)
start_time = start_time_value
end_time = start_time + timedelta(hours=0, minutes=14, seconds=59)
start_time = start_time.timestamp() * 1000
end_time = end_time.timestamp() * 1000
return start_time, end_time
def recommend_best_parameters():
logger.info("Started Recommending best Parameters")
start_time, end_time = get_start_time_and_end_time()
prediction_time_difference = 15 * 60 * 1000
prediction_time_difference_end = 1000 * 60 * 30
absolute_time = {"start_absolute": start_time, "end_absolute": end_time}
logger.debug("Fetching most recent live data")
live_data_puller = DataPuller(db_host=KAIROS_DB_HOST,
data_config=data_conf,
payload="live_query",
absolute_time=absolute_time)
df_input = live_data_puller.get_data()
data_dict = df_input.to_dict(orient="records")
logger.debug("Input Data Received")
optimizer = ParameterOptimization()
recommendation_flag, recommendation_result, predicted_result = optimizer.find_optimum(data_dict=data_dict,
controllable=True,
add_predict=TARGET_VALUE)
logger.debug("Obtained optimized result from Optimizer")
logger.debug("Output Recommendation : {}".format(simplejson.dumps(recommendation_result, allow_nan=False)))
logger.debug("Output Prediction : {}".format(simplejson.dumps(predicted_result, allow_nan=False)))
logger.debug("Tagging the data appropriately to be shared to Kafka/DataProcessor")
tagged_recommendation_result = dict()
tagged_predicted_result = dict()
for each_parameter in OUTPUT_PARAMETER_QUICK_MAP_INFO:
if each_parameter in recommendation_result:
tagged_recommendation_result[OUTPUT_PARAMETER_QUICK_MAP_INFO[each_parameter]] = recommendation_result[
each_parameter]
if each_parameter in predicted_result:
tagged_predicted_result[OUTPUT_PARAMETER_QUICK_MAP_INFO[each_parameter]] = predicted_result[each_parameter]
print(tagged_recommendation_result)
print(tagged_predicted_result)
recommendation_time = end_time + 1
prediction_time_start = recommendation_time + prediction_time_difference
prediction_time_end = recommendation_time + prediction_time_difference_end
recommendation_output = KafkaDataModel(data=tagged_recommendation_result,
site_id=OUTPUT_SITE_ID,
timestamp=int(recommendation_time))
logger.info("Recommendation Output : {}".format(simplejson.dumps(recommendation_output.dict(), allow_nan=False)))
predicted_output_start = KafkaDataModel(data=tagged_predicted_result,
site_id=OUTPUT_SITE_ID,
timestamp=int(prediction_time_start))
predicted_output_end = KafkaDataModel(data=tagged_predicted_result,
site_id=OUTPUT_SITE_ID,
timestamp=int(prediction_time_end))
logger.info("Prediction Output Start : {}".format(simplejson.dumps(predicted_output_start.dict(), allow_nan=False)))
logger.info("Prediction Output End : {}".format(simplejson.dumps(predicted_output_end.dict(), allow_nan=False)))
kafka_writer = KafkaProducerUtil()
if recommendation_flag:
kafka_writer.publish(topic=KAFKA_TOPIC, data=simplejson.dumps(recommendation_output.dict(), allow_nan=False))
kafka_writer.publish(topic=KAFKA_TOPIC, data=simplejson.dumps(predicted_output_start.dict(), allow_nan=False))
kafka_writer.publish(topic=KAFKA_TOPIC, data=simplejson.dumps(predicted_output_end.dict(), allow_nan=False))
logger.info("Outputs written to Kafka successfully!")
else:
logger.info("Outputs not written as recommendations flag was false!")
if __name__ == '__main__':
recommend_best_parameters()
[KAIROS_DB]
uri=$KAIROS_URI
[KAFKA]
kafka_host=$KAFKA_HOST
kafka_port=$KAFKA_PORT
kafka_topic=$KAFKA_TOPIC
[POSTGRES]
postgres_uri=$POSTGRES_URI
\ No newline at end of file
This diff is collapsed.
device_tag,column,importances,min,max,mean,std_dev,range_min,range_max,golden_batch_minimum,golden_batch_maximum,operating_min,operating_max,module,model_version
PI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,19.2,0,1,0.4,0.1,0.3,0.5,0,1,0,1,Golden Batch - CTCMP,latest
TI_R7204_3,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,12.1,139,204,170.7,9.4,156.6,184.8,156,185,156,185,Golden Batch - CTCMP,latest
TIC_R7204_1,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,3.3,171,194,181.3,4.0,175.3,187.3,175,188,175,188,Golden Batch - CTCMP,latest
FI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,8.7,4,368,198.1,67.8,96.4,299.8,96,300,96,300,Golden Batch - CTCMP,latest
FI_R7204_4,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,25.3,50,129,92.1,12.2,73.8,110.3,73,111,73,111,Golden Batch - CTCMP,latest
FI_R7204_5,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,8.6,17,241,140.4,40.3,79.9,200.9,79,201,79,201,Golden Batch - CTCMP,latest
AI_R7204,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,20.6,-1,0,-0.9,0.0,-1.0,-0.9,-1,0,-1,0,Golden Batch - CTCMP,latest
RATIO,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,2.2,-31,9,3.6,1.0,2.1,5.2,2,6,2,6,Golden Batch - CTCMP,latest
KAIROS_URI= https://iLens:iLensJUB$456@jubilant.ilens.io/kairos
KAFKA_HOST=192.168.0.220
KAFKA_PORT=9092
KAFKA_TOPIC=ilens_dev
POSTGRES_URI = postgresql://iLens:iLensJUB$456@jubilant.ilens.io/kairos
\ No newline at end of file
Tag ID,Tag Name,Site,Plant,Line,Equipment,Parameter Name
site_114$dept_129$line_326$equipment_3903$tag_4831,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 5
site_114$dept_129$line_326$equipment_3903$tag_4648,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow CV,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow CV
site_114$dept_129$line_326$equipment_3903$tag_4692,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Pressure 2
site_114$dept_129$line_326$equipment_3903$tag_4703,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature 1
site_114$dept_129$line_326$equipment_3903$tag_4704,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature 2
site_114$dept_129$line_326$equipment_3903$tag_4705,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 3,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature 3
site_114$dept_129$line_326$equipment_3903$tag_4828,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 2,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 2
site_114$dept_129$line_326$equipment_3903$tag_4829,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 3
site_114$dept_129$line_326$equipment_3903$tag_4830,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 4
site_114$dept_129$line_326$equipment_3903$tag_4862,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Chlorine
site_114$dept_129$line_326$equipment_3903$tag_4688,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Running status,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Running status
site_114$dept_129$line_326$equipment_3903$tag_4647,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature CV,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature CV
site_114$dept_129$line_326$equipment_3903$tag_5153,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Ratio R7204
site_114$dept_129$line_326$equipment_3903$tag_4570,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature
parameters,raw_parameter,lower_parameter,upper_parameter,original_parameter
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,Pressure 2,ai_yield_Pressure 2_lower,ai_yield_Pressure 2_upper,ai_yield_Pressure 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,Temperature 1,ai_yield_Temperature 1_lower,ai_yield_Temperature 1_upper,ai_yield_Temperature 1_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,Temperature 2,ai_yield_Temperature 2_lower,ai_yield_Temperature 2_upper,ai_yield_Temperature 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,Flow 3,ai_yield_Flow 3_lower,ai_yield_Flow 3_upper,ai_yield_Flow 3_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,Flow 4,ai_yield_Flow 4_lower,ai_yield_Flow 4_upper,ai_yield_Flow 4_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,Flow 5,ai_yield_Flow 5_lower,ai_yield_Flow 5_upper,ai_yield_Flow 5_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,Chlorine,ai_yield_Chlorine_lower,ai_yield_Chlorine_upper,ai_yield_Chlorine_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,Ratio R7204,ai_yield_Ratio R7204_lower,ai_yield_Ratio R7204_upper,ai_yield_Ratio R7204_original
This diff is collapsed.
APP_NAME=ebpr_report_engine
MONGO_URI=mongodb://192.168.0.220:2717/
KAIROS_URI= http://192.168.0.220:8080
MQTT_URL=192.168.0.220
MQTT_PORT=1883
SCHEDULER_PROXY=http://192.168.0.220/ilens-scheduler
ILENS_META_SERVICE_URL=http://192.168.0.220/ilens_api
EBPR_DATA_ENGINE_PROXY=http://192.168.0.220/formde
EBPR_PROXY=http://192.168.0.220/ebpr
WORKFLOW_MGMT_PROXY = http://jubilant.ilens.io/workflow-mt
REDIS_HOST=192.168.0.220
REDIS_PORT=6379
REDIS_BROKER_URI_CELERY = redis://192.168.0.220:6379/10
SECURITY_IP_CHECK=false
SECURITY_USER_CHECK=true
SECURITY_AGENT_CHECK=true
LOG_LEVEL=INFO
LOG_TRACEBACK=true
REPORT_DIRECTORY=reports
BASE_DIRECTORY=data
EBPR_REPORT_PROXY=http://192.168.0.220/ebpr_reports
ENABLE_CELERY_WORKER=true
HUB_NAME=
CONNECTION_STRING=
API_VERSION=
MOBILE_PUSH_NOTIFICATION=false
KAFKA_HOST=192.168.0.220
KAFKA_PORT=9092
KAFKA_TOPIC=ilens_dev
START_DATE=1627776020
END_DATE=1628035220
\ No newline at end of file
device_tag,column,importances,min,max,mean,std_dev,range_min,range_max,golden_batch_minimum,golden_batch_maximum,operating_min,operating_max,module,model_version,rec_lower,rec_upper,rec_original,rec_status
PI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,19.2,0,1,0.4,0.1,0.3,0.5,0,1,0,1,Golden Batch - CTCMP,latest,0,1,0.43,Operator to make changes based on recommendations!
TI_R7204_3,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,12.1,139,204,170.7,9.4,156.6,184.8,156,185,156,185,Golden Batch - CTCMP,latest,165,168,166.54,Operator to make changes based on recommendations!
TIC_R7204_1,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,3.3,171,194,181.3,4.0,175.3,187.3,175,188,175,188,Golden Batch - CTCMP,latest,186,188,186.81,Operator to make changes based on recommendations!
FI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,8.7,4,368,198.1,67.8,96.4,299.8,96,300,96,300,Golden Batch - CTCMP,latest,96,127,104.17,Operator to make changes based on recommendations!
FI_R7204_4,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,25.3,50,129,92.1,12.2,73.8,110.3,73,111,73,111,Golden Batch - CTCMP,latest,87,92,89.45,Operator to make changes based on recommendations!
FI_R7204_5,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,8.6,17,241,140.4,40.3,79.9,200.9,79,201,79,201,Golden Batch - CTCMP,latest,171,201,199.57,Operator to make changes based on recommendations!
AI_R7204,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,20.6,-1,0,-0.9,0.0,-1.0,-0.9,-1,0,-1,0,Golden Batch - CTCMP,latest,-1,0,-0.33,Operator to make changes based on recommendations!
RATIO,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,2.2,-31,9,3.6,1.0,2.1,5.2,2,6,2,6,Golden Batch - CTCMP,latest,2,4,3.25,Operator to make changes based on recommendations!
flavors:
python_function:
env: conda.yaml
loader_module: mlflow.sklearn
model_path: model.pkl
python_version: 3.8.12
sklearn:
pickled_model: model.pkl
serialization_format: cloudpickle
sklearn_version: 1.0.2
utc_time_created: '2022-03-31 08:38:33.992936'
channels:
- conda-forge
dependencies:
- python=3.8.12
- pip
- pip:
- mlflow
- cloudpickle==2.0.0
- psutil==5.9.0
- scikit-learn==0.0
- scikit-learn==1.0.2
- typing-extensions==4.1.1
name: mlflow-env
mlflow
cloudpickle==2.0.0
psutil==5.9.0
scikit-learn==0.0
scikit-learn==1.0.2
typing-extensions==4.1.1
\ No newline at end of file
columns,importances
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,25.34742183515991
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,20.56310571518254
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,19.191097470789952
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,12.090429856362187
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,8.715560671364255
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,8.626076494006318
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,3.259925613054504
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,2.20638234408034
parameters,raw_parameter,lower_parameter,upper_parameter,original_parameter
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,Pressure 2,ai_yield_Pressure 2_lower,ai_yield_Pressure 2_upper,ai_yield_Pressure 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,Temperature 1,ai_yield_Temperature 1_lower,ai_yield_Temperature 1_upper,ai_yield_Temperature 1_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,Temperature 2,ai_yield_Temperature 2_lower,ai_yield_Temperature 2_upper,ai_yield_Temperature 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,Flow 3,ai_yield_Flow 3_lower,ai_yield_Flow 3_upper,ai_yield_Flow 3_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,Flow 4,ai_yield_Flow 4_lower,ai_yield_Flow 4_upper,ai_yield_Flow 4_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,Flow 5,ai_yield_Flow 5_lower,ai_yield_Flow 5_upper,ai_yield_Flow 5_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,Chlorine,ai_yield_Chlorine_lower,ai_yield_Chlorine_upper,ai_yield_Chlorine_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,Ratio R7204,ai_yield_Ratio R7204_lower,ai_yield_Ratio R7204_upper,ai_yield_Ratio R7204_original
pytz==2021.3
loguru==0.5.3
scipy==1.7.1
numpy==1.21.2
pandas==1.3.3
mlflow==1.20.2
sklearn
simplejson==3.17.5
requests==2.26.0
pydantic==1.8.2
python-dotenv==0.19.2
PyYAML==6.0
kafka-python==1.4.7
SQLAlchemy==1.3.20
sqlparse==0.4.2
psycopg2==2.9.1
\ No newline at end of file
from loguru import logger
from scripts.core.data.data_import import DataPuller
from scripts.constants.app_configuration import KAIROS_DB_HOST, data_conf, training_data
def training_data_generator():
logger.info("Fetching the training data!")
training_data_puller = DataPuller(db_host=KAIROS_DB_HOST, data_config=data_conf, payload="train_query")
logger.debug("Pulling the training data from Kairos")
df_training_data = training_data_puller.get_data()
logger.debug("Attempting to save the loaded data!")
df_training_data.to_csv(training_data, index=False)
logger.info("Training Data has been saved successfully!")
if __name__ == '__main__':
# Create the training data!
training_data_generator()
from dotenv import load_dotenv
import os
import sys
from configparser import ConfigParser, BasicInterpolation
import yaml
# Configuration File Constants
_application_conf = f"./conf/application.conf"
_default_conf = f"./config.env"
data_conf = f"./conf/data.yml"
training_data = f"./data/training.csv"
load_dotenv(dotenv_path=_default_conf)
class EnvInterpolation(BasicInterpolation):
"""
Interpolation which expands environment variables in values.
"""
def before_get(self, parser, section, option, value, defaults):
value = super().before_get(parser, section, option, value, defaults)
if not os.path.expandvars(value).startswith("$"):
return os.path.expandvars(value)
else:
return
try:
config = ConfigParser(interpolation=EnvInterpolation())
config.read(_application_conf)
except Exception as e:
print(f"Error while loading the config: {e}")
print("Failed to Load Configuration. Exiting!!!")
sys.exit()
class Logging:
level = config.get("LOGGING", "level", fallback="INFO")
level = level if level else "INFO"
tb_flag = config.getboolean("LOGGING", "traceback", fallback=True)
tb_flag = tb_flag if tb_flag is not None else True
# Configuration Variables
# Kairos Configuration Variables
KAIROS_DB_HOST = config["KAIROS_DB"]["uri"]
# Postgres Configuration Variables
POSTGRES_URI = config["POSTGRES"]["postgres_uri"]
# Kafka Configuration Variables
KAFKA_HOST = config["KAFKA"]["kafka_host"]
KAFKA_PORT = config["KAFKA"]["kafka_port"]
KAFKA_TOPIC = config["KAFKA"]["kafka_topic"]
# Read the configuration file
with open(data_conf, "r") as _cf:
_config = yaml.full_load(_cf)
MODEL = _config["model"]
PARAMETER_INFO = _config["parameter"]
PARAMETER_INPUT = MODEL["model_input_features"]
OUTPUT_DATA = _config["output"]
OUTPUT_PARAMETER_QUICK_MAP_INFO = OUTPUT_DATA["quick_map"]
TARGET_VALUE = OUTPUT_DATA["target"]
OUTPUT_SITE_ID = OUTPUT_DATA["site"]
from json import dumps
from kafka import KafkaProducer
from loguru import logger
from scripts.constants.app_configuration import KAFKA_HOST, KAFKA_PORT, KAFKA_TOPIC
class KafkaProducerUtil:
def __init__(self):
try:
self.host = KAFKA_HOST
self.port = KAFKA_PORT
logger.debug(f"Connecting to Kafka with details: {self.host}, {self.port}")
kafka_broker = [self.host + ":" + str(self.port)]
self.producer = KafkaProducer(
bootstrap_servers=kafka_broker,
value_serializer=lambda v: v.encode('utf-8'),
api_version=(0, 10, 1))
self.producer.flush()
except Exception as e:
logger.error(f"Kafka connection error: {e}")
def publish(self, topic, data):
try:
kafka_response = self.producer.send(topic, data)
self.producer.flush()
logger.debug(f" Message sent to kafka with response: {kafka_response}")
return True
except Exception as e:
logger.error(e)
return False
import json
import pandas as pd
import requests
import yaml
from loguru import logger
class DataPuller(object):
def __init__(self, db_host, data_config, payload, absolute_time=None, optional_payload=None):
_config_file = data_config
with open(_config_file, 'r') as _cf:
self.conf = yaml.full_load(_cf)
self.db_host_url = db_host
self.request_url = "{kairos_host}/api/v1/datapoints/query".format(kairos_host=self.db_host_url)
self.payload = self.conf[payload]
self.column_rename = self.conf["column_renamer"]
if absolute_time is not None:
if "start_relative" in self.payload:
del self.payload["start_relative"]
if "end_relative" in self.payload:
del self.payload["end_relative"]
self.payload["start_absolute"] = absolute_time["start_absolute"]
self.payload["end_absolute"] = absolute_time["end_absolute"]
self.payload = json.dumps(self.payload)
def get_data(self):
logger.info("Data for the parameters being pulled from Kairos Database")
response_data = requests.post(url=self.request_url, data=self.payload).json()
output_data = response_data["queries"]
logger.debug("Data pull complete")
df_final = pd.DataFrame()
for i in range(len(output_data)):
grouped_output_data = output_data[i]["results"]
for each_grouped_data in grouped_output_data:
value = (each_grouped_data["values"])
tag_id = each_grouped_data["group_by"][0]["group"]["c3"]
try:
logger.debug("Renamed {} to {} in Data".format(tag_id, self.column_rename[tag_id]))
column_name = self.column_rename[tag_id]
except KeyError as ke:
logger.debug("Column Renaming Logic not found for {}".format(tag_id))
column_name = tag_id
df_column_data = pd.DataFrame(data=value, columns=["timestamp", column_name])
if df_final.empty:
df_final = df_column_data
else:
df_final = df_final.merge(df_column_data, how="outer", left_on="timestamp", right_on="timestamp")
df_final["epochtime"] = df_final["timestamp"]
df_final["timestamp"] = pd.to_datetime(df_final['timestamp'], unit="ms").dt.tz_localize('UTC').dt.tz_convert(
'Asia/Kolkata')
logger.debug("Final number of columns : {}".format(str(len(list(df_final.columns)))))
return df_final
from loguru import logger
from scripts.core.data.data_import import DataPuller
from scripts.constants.app_configuration import KAIROS_DB_HOST, data_conf, training_data
def training_data_generator():
logger.info("Fetching the training data!")
training_data_puller = DataPuller(db_host=KAIROS_DB_HOST, data_config=data_conf, payload="train_query")
logger.debug("Pulling the training data from Kairos")
df_training_data = training_data_puller.get_data()
logger.debug("Attempting to save the loaded data!")
df_training_data.to_csv(training_data, index=False)
logger.info("Training Data has been saved successfully!")
# if __name__ == '__main__':
# # Create the training data!
# training_data_generator()
import mlflow
import mlflow.sklearn
from loguru import logger
class ModelLoader(object):
def __init__(self, model_info):
self.model_info = model_info
def load_model(self):
logger.info("Loading the Model")
if self.model_info["type"] == "mlflow.sklearn":
return self._load_mlflow_sklearn_model()
else:
logger.info("Unsupported Model Type")
def _load_mlflow_sklearn_model(self):
try:
_model = mlflow.sklearn.load_model(self.model_info["path"])
print("path =", self.model_info["path"])
logger.debug("Model loaded successfully!")
return _model
except Exception as e:
logger.error("Error while loading mlflow.sklearn model : {}".format(str(e)))
This diff is collapsed.
This diff is collapsed.
from typing import Any, Dict
from pydantic import BaseModel
class AzureDataModel(BaseModel):
blob_name: str
data: Any
class KafkaDataModel(BaseModel):
data: Dict[str, Any]
site_id: str
gw_id: str = ''
pd_id: str = ''
timestamp: int
msg_id: int = 0
partition: str = ''
retain_flag: bool = False
import pandas as pd
import json
file_path = "../../data/ilens_tags.csv"
op_file_path = "../../data/model_op_tags.csv"
def get_keys():
df = pd.read_csv(file_path)
selected_data = {
"Site": ['Bharuch-Unit 2'],
"Plant": ['CTCMP'],
"Line": ['Stage IIA - Chlorination & Neutralization-I'],
"Equipment": ['Reactor R-7204']
}
for each_level in selected_data:
df = df.loc[df[each_level].isin(selected_data[each_level])]
final_data = df.to_dict(orient="records")
record_data = dict()
for each_data in final_data:
record_data[each_data["Tag ID"]] = each_data["Tag Name"]
print("******************************************")
print(json.dumps(record_data))
print("******************************************")
print("******************************************")
print(json.dumps(list(record_data.keys())))
print("******************************************")
def get_op_tags():
df = pd.read_csv(op_file_path)
final_data = df.to_dict(orient="records")
record_data = dict()
for each_data in final_data:
for range_key in ['lower', 'upper', 'original']:
key = f"{each_data['parameters']}_{range_key}"
record_data[key] = each_data[f"{range_key}_parameter_SITE_id"]
print("******************************************")
print(json.dumps(record_data))
print("******************************************")
if __name__ == '__main__':
# get_keys()
get_op_tags()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment