Commit 83fa1930 authored by aakash.bedi's avatar aakash.bedi

first commit

parent df41e89b
Pipeline #33196 failed with stage
__pycache__/
\ No newline at end of file
stages:
- auto-tagging
- deploy
- update
variables:
MYSQL_CONNECTION: "mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASS "
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $VERSION_DB.$DB_TABLE WHERE category='Server' AND type='Service' AND os='docker' AND module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -e "INSERT INTO $VERSION_DB.$DB_TABLE values('Server','Service','$CI_PROJECT_NAME','docker', '2', '0', '0', '0')";fi
- QA=$($MYSQL_CONNECTION -N -e "SELECT qa FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- DEV=$($MYSQL_CONNECTION -N -e "SELECT dev FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- UAT=$(mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASS -N -e "SELECT uat FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
- PROD=$($MYSQL_CONNECTION -N -e "SELECT prod FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'")
auto-tagging:
stage: auto-tagging
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $VERSION_DB.$VERSION_RELEASE_TABLE WHERE module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -N -e "INSERT INTO $VERSION_DB.$VERSION_RELEASE_TABLE values('$CI_PROJECT_NAME', 'iLens', '5', '13', '0', '0')";fi
- ILENS=$($MYSQL_CONNECTION -N -e "SELECT ilens_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- RELEASE=$($MYSQL_CONNECTION -N -e "SELECT release_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- FEATURE=$($MYSQL_CONNECTION -N -e "SELECT feature_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- PATCH=$($MYSQL_CONNECTION -N -e "SELECT patch_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
script:
- SOURCE_BRANCH=$(echo $CI_COMMIT_TITLE | cut -f 3 -d " " | cut -f 1 -d "/" | cut -f 2 -d "'")
- >
if [ "$SOURCE_BRANCH" = "QA" ]; then
((RELEASE=RELEASE+1)) && FEATURE=0 && PATCH=0;
TAG_NAME=v$ILENS.$RELEASE.$FEATURE
IMAGE_URL=azrilensprod.azurecr.io/ilens/release/versions/v"$ILENS.$RELEASE:$CI_PROJECT_NAME-$TAG_NAME"
PROD=$RELEASE; QA=0; DEV=0;
$MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name='$CI_PROJECT_NAME' AND type='Service' AND category='Server' AND os='docker'"
elif [ $SOURCE_BRANCH == "feature" ]; then
((FEATURE=FEATURE+1)) && PATCH=0;
TAG_NAME=v$ILENS.$RELEASE.$FEATURE
IMAGE_URL=azrilensprod.azurecr.io/ilens/release/versions/v"$ILENS.$RELEASE:$CI_PROJECT_NAME-$TAG_NAME"
elif [ $SOURCE_BRANCH == "patch" ]; then
((PATCH=PATCH+1));
TAG_NAME=v$ILENS.$RELEASE.$FEATURE.$PATCH
IMAGE_URL=azrilensprod.azurecr.io/ilens/release/versions/v"$ILENS.$RELEASE:$CI_PROJECT_NAME-$TAG_NAME"
else
exit 1
fi
- echo -e "\n\nImage:" $IMAGE_URL >> ReleaseNote.txt
- sed -i "1s|^|Version":" $TAG_NAME\n|" ReleaseNote.txt
- sed -i "1s|^|Module Name":" $CI_PROJECT_NAME\n|" ReleaseNote.txt
- docker build -t $IMAGE_URL .
- docker push $IMAGE_URL
- docker rmi --force $IMAGE_URL
- URL=$(echo $CI_PROJECT_URL | sed 's|https://||')
- git remote set-url origin https://$GIT_USRNAME:$GIT_USRPASSWD@$URL
- git config user.email "devopsilens@gmail.com"
- git config user.name "$GIT_USRNAME"
- git tag -a $TAG_NAME -F ReleaseNote.txt
- git push origin $TAG_NAME
- $MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$VERSION_RELEASE_TABLE SET release_version='$RELEASE', feature_version='$FEATURE', patch_version='$PATCH' WHERE module_name = '$CI_PROJECT_NAME' "
- $MYSQL_CONNECTION -e "INSERT INTO $HISTORY_DB.$VERSION_RELEASE_TABLE values('$CI_JOB_ID', '$CI_PROJECT_NAME','iLens', '$ILENS.$RELEASE.$FEATURE', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
tags:
- shell
only:
- master
#~~~~~| QA K8 AKS |~~~~~#
qa-aks-deployment:
stage: deploy
script:
- REGISTRY_URL=azacrknowledgelens.azurecr.io/knowledgelens/products/ilens/qa
- export KUBECONFIG=/home/gitlab-runner/.kube/config-aks-qa
- NAMESPACE=ilens-core
- QA=`expr $QA + 1` && DEV=0
- docker build -t $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV .
- docker push $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV
only:
- QA
tags:
- shell
tag-update-qa:
stage: update
script:
- QA=`expr $QA + 1` && DEV=0
- REGISTRY_URL=azacrknowledgelens.azurecr.io/knowledgelens/products/ilens/qa
- docker rmi --force $REGISTRY_URL/$CI_PROJECT_NAME:v$PROD.$QA.$DEV
- $MYSQL_CONNECTION -e "INSERT INTO $HISTORY_DB.$DB_TABLE values('$CI_JOB_ID','Server','Service', '$CI_PROJECT_NAME','docker', '$PROD.$QA.$DEV', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
- $MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name = '$CI_PROJECT_NAME' AND type = 'Service' AND category = 'Server' AND os = 'docker'"
dependencies:
- qa-aks-deployment
only:
- QA
tags:
- shell
# Default ignored files
/shelf/
/workspace.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="jdk" jdkName="Python 3.9 (pipeline-neshap) (2)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="2">
<item index="0" class="java.lang.String" itemvalue="backports.zoneinfo" />
<item index="1" class="java.lang.String" itemvalue="pandas" />
</list>
</value>
</option>
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="N801" />
<option value="N806" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredIdentifiers">
<list>
<option value="str.__and__" />
</list>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (pipeline-neshap) (2)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/ctcmp-golden-batch.iml" filepath="$PROJECT_DIR$/.idea/ctcmp-golden-batch.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
FROM python:3.8-buster
COPY . /code
WORKDIR /code
RUN pip install -r requirements.txt
CMD [ "python","app.py" ]
\ No newline at end of file
# CTCMP Golden Batch Analytics
\ No newline at end of file
Release Notes:
[2021-10-27] Features:
1. Updated model file with client recommended parameters.
2. Computing every 15 minutes now.
3. Recommendations are in a range now.
[Initial] Features:
1. New model "delta_v1" with an MAE of about 800 being deployed.
\ No newline at end of file
__version__ = "V0.0.1"
from loguru import logger
from datetime import datetime, timedelta
from scripts.core.data.data_import import DataPuller
from scripts.constants.app_configuration import KAIROS_DB_HOST, data_conf, OUTPUT_PARAMETER_QUICK_MAP_INFO, KAFKA_TOPIC, OUTPUT_SITE_ID, TARGET_VALUE
from scripts.core.recommender.parameter_optimizer import ParameterOptimization
import simplejson
from scripts.data_model.egress_data_model import KafkaDataModel
from scripts.core.data.data_export import KafkaProducerUtil
def get_start_time_and_end_time():
logger.info("Finding the start and end time for the data fetch - relative recent data")
# Time Intervals
# 01:00-01:15, 01:15-01:30, 01:30-01:45, 01:45-02:00
current_time = datetime.now()
if current_time.minute >= 45:
# timeframe to consider 30-45
start_time_value = current_time.replace(minute=30, second=0, microsecond=0)
elif current_time.minute >= 30:
# timeframe to consider 15-30
start_time_value = current_time.replace(minute=15, second=0, microsecond=0)
elif current_time.minute >= 10:
# timeframe to consider 00-15
start_time_value = current_time.replace(minute=0, second=0, microsecond=0)
else:
# timeframe to consider 45-00
start_time_value = current_time.replace(minute=45, second=0, microsecond=0)
start_time_value = start_time_value - timedelta(hours=1, minutes=0)
start_time = start_time_value
end_time = start_time + timedelta(hours=0, minutes=14, seconds=59)
start_time = start_time.timestamp() * 1000
end_time = end_time.timestamp() * 1000
return start_time, end_time
def recommend_best_parameters():
logger.info("Started Recommending best Parameters")
start_time, end_time = get_start_time_and_end_time()
prediction_time_difference = 15 * 60 * 1000
prediction_time_difference_end = 1000 * 60 * 30
absolute_time = {"start_absolute": start_time, "end_absolute": end_time}
logger.debug("Fetching most recent live data")
live_data_puller = DataPuller(db_host=KAIROS_DB_HOST,
data_config=data_conf,
payload="live_query",
absolute_time=absolute_time)
df_input = live_data_puller.get_data()
data_dict = df_input.to_dict(orient="records")
logger.debug("Input Data Received")
optimizer = ParameterOptimization()
recommendation_flag, recommendation_result, predicted_result = optimizer.find_optimum(data_dict=data_dict,
controllable=True,
add_predict=TARGET_VALUE)
logger.debug("Obtained optimized result from Optimizer")
logger.debug("Output Recommendation : {}".format(simplejson.dumps(recommendation_result, allow_nan=False)))
logger.debug("Output Prediction : {}".format(simplejson.dumps(predicted_result, allow_nan=False)))
logger.debug("Tagging the data appropriately to be shared to Kafka/DataProcessor")
tagged_recommendation_result = dict()
tagged_predicted_result = dict()
for each_parameter in OUTPUT_PARAMETER_QUICK_MAP_INFO:
if each_parameter in recommendation_result:
tagged_recommendation_result[OUTPUT_PARAMETER_QUICK_MAP_INFO[each_parameter]] = recommendation_result[
each_parameter]
if each_parameter in predicted_result:
tagged_predicted_result[OUTPUT_PARAMETER_QUICK_MAP_INFO[each_parameter]] = predicted_result[each_parameter]
print(tagged_recommendation_result)
print(tagged_predicted_result)
recommendation_time = end_time + 1
prediction_time_start = recommendation_time + prediction_time_difference
prediction_time_end = recommendation_time + prediction_time_difference_end
recommendation_output = KafkaDataModel(data=tagged_recommendation_result,
site_id=OUTPUT_SITE_ID,
timestamp=int(recommendation_time))
logger.info("Recommendation Output : {}".format(simplejson.dumps(recommendation_output.dict(), allow_nan=False)))
predicted_output_start = KafkaDataModel(data=tagged_predicted_result,
site_id=OUTPUT_SITE_ID,
timestamp=int(prediction_time_start))
predicted_output_end = KafkaDataModel(data=tagged_predicted_result,
site_id=OUTPUT_SITE_ID,
timestamp=int(prediction_time_end))
logger.info("Prediction Output Start : {}".format(simplejson.dumps(predicted_output_start.dict(), allow_nan=False)))
logger.info("Prediction Output End : {}".format(simplejson.dumps(predicted_output_end.dict(), allow_nan=False)))
kafka_writer = KafkaProducerUtil()
if recommendation_flag:
kafka_writer.publish(topic=KAFKA_TOPIC, data=simplejson.dumps(recommendation_output.dict(), allow_nan=False))
kafka_writer.publish(topic=KAFKA_TOPIC, data=simplejson.dumps(predicted_output_start.dict(), allow_nan=False))
kafka_writer.publish(topic=KAFKA_TOPIC, data=simplejson.dumps(predicted_output_end.dict(), allow_nan=False))
logger.info("Outputs written to Kafka successfully!")
else:
logger.info("Outputs not written as recommendations flag was false!")
if __name__ == '__main__':
recommend_best_parameters()
[KAIROS_DB]
uri=$KAIROS_URI
[KAFKA]
kafka_host=$KAFKA_HOST
kafka_port=$KAFKA_PORT
kafka_topic=$KAFKA_TOPIC
[POSTGRES]
postgres_uri=$POSTGRES_URI
\ No newline at end of file
column_renamer:
site_114$dept_129$line_326$equipment_3903$tag_4831: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5
site_114$dept_129$line_326$equipment_3903$tag_4648: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow CV
site_114$dept_129$line_326$equipment_3903$tag_4692: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2
site_114$dept_129$line_326$equipment_3903$tag_4703: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1
site_114$dept_129$line_326$equipment_3903$tag_4704: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2
site_114$dept_129$line_326$equipment_3903$tag_4705: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 3
site_114$dept_129$line_326$equipment_3903$tag_4828: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 2
site_114$dept_129$line_326$equipment_3903$tag_4829: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3
site_114$dept_129$line_326$equipment_3903$tag_4830: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4
site_114$dept_129$line_326$equipment_3903$tag_4862: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine
site_114$dept_129$line_326$equipment_3903$tag_4688: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Running status
site_114$dept_129$line_326$equipment_3903$tag_4647: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature CV
site_114$dept_129$line_326$equipment_3903$tag_5153: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204
site_114$dept_129$line_326$equipment_3903$tag_4570: Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature
train_query:
metrics:
- tags:
c3:
- site_114$dept_129$line_326$equipment_3903$tag_4831
- site_114$dept_129$line_326$equipment_3903$tag_4648
- site_114$dept_129$line_326$equipment_3903$tag_4692
- site_114$dept_129$line_326$equipment_3903$tag_4703
- site_114$dept_129$line_326$equipment_3903$tag_4704
- site_114$dept_129$line_326$equipment_3903$tag_4705
- site_114$dept_129$line_326$equipment_3903$tag_4828
- site_114$dept_129$line_326$equipment_3903$tag_4829
- site_114$dept_129$line_326$equipment_3903$tag_4830
- site_114$dept_129$line_326$equipment_3903$tag_4862
- site_114$dept_129$line_326$equipment_3903$tag_4647
- site_114$dept_129$line_326$equipment_3903$tag_5153
- site_114$dept_129$line_326$equipment_3903$tag_4570
name: ilens.live_data.raw
group_by:
- name: tag
tags:
- c3
aggregators:
- name: avg
sampling:
value: '10'
unit: minutes
align_sampling: true
align_start_time: true
- tags:
c3:
- site_114$dept_129$line_326$equipment_3903$tag_4688
name: ilens.live_data.raw
group_by:
- name: tag
tags:
- c3
aggregators:
- name: min
sampling:
value: '1'
unit: hours
align_sampling: true
align_start_time: true
plugins: [ ]
cache_time: 0
time_zone: Asia/Calcutta
start_relative:
value: 6
unit: months
live_query:
metrics:
- tags:
c3:
- site_114$dept_129$line_326$equipment_3903$tag_4831
- site_114$dept_129$line_326$equipment_3903$tag_4648
- site_114$dept_129$line_326$equipment_3903$tag_4692
- site_114$dept_129$line_326$equipment_3903$tag_4703
- site_114$dept_129$line_326$equipment_3903$tag_4704
- site_114$dept_129$line_326$equipment_3903$tag_4705
- site_114$dept_129$line_326$equipment_3903$tag_4828
- site_114$dept_129$line_326$equipment_3903$tag_4829
- site_114$dept_129$line_326$equipment_3903$tag_4830
- site_114$dept_129$line_326$equipment_3903$tag_4862
- site_114$dept_129$line_326$equipment_3903$tag_4647
- site_114$dept_129$line_326$equipment_3903$tag_5153
- site_114$dept_129$line_326$equipment_3903$tag_4570
name: ilens.live_data.raw
group_by:
- name: tag
tags:
- c3
aggregators:
- name: avg
sampling:
value: '15'
unit: minutes
align_sampling: true
align_start_time: true
- tags:
c3:
- site_114$dept_129$line_326$equipment_3903$tag_4688
name: ilens.live_data.raw
group_by:
- name: tag
tags:
- c3
aggregators:
- name: min
sampling:
value: '15'
unit: minutes
align_sampling: true
align_start_time: true
plugins: [ ]
cache_time: 0
time_zone: Asia/Calcutta
start_relative:
value: 6
unit: months
parameter:
controllable_params:
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204'
non_controllable_params:
feed_input:
# - 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1'
model:
type: "mlflow.sklearn"
path: "./model/ctcmp_v1"
model_input_features:
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine'
- 'Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204'
output:
quick_map:
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2_lower: site_114$dept_129$line_326$equipment_3903$tag_6068
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2_upper: site_114$dept_129$line_326$equipment_3903$tag_6069
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2_original: site_114$dept_129$line_326$equipment_3903$tag_6070
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4_lower: site_114$dept_129$line_326$equipment_3903$tag_6071
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4_upper: site_114$dept_129$line_326$equipment_3903$tag_6072
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4_original: site_114$dept_129$line_326$equipment_3903$tag_6073
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1_lower: site_114$dept_129$line_326$equipment_3903$tag_6114
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2_lower: site_114$dept_129$line_326$equipment_3903$tag_6115
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2_upper: site_114$dept_129$line_326$equipment_3903$tag_6116
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2_original: site_114$dept_129$line_326$equipment_3903$tag_6117
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1_upper: site_114$dept_129$line_326$equipment_3903$tag_6118
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1_original: site_114$dept_129$line_326$equipment_3903$tag_6119
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3_lower: site_114$dept_129$line_326$equipment_3903$tag_6120
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3_upper: site_114$dept_129$line_326$equipment_3903$tag_6121
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3_original: site_114$dept_129$line_326$equipment_3903$tag_6122
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5_lower: site_114$dept_129$line_326$equipment_3903$tag_6123
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5_upper: site_114$dept_129$line_326$equipment_3903$tag_6124
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5_original: site_114$dept_129$line_326$equipment_3903$tag_6125
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204_lower: site_114$dept_129$line_326$equipment_3903$tag_6126
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204_upper: site_114$dept_129$line_326$equipment_3903$tag_6127
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204_original: site_114$dept_129$line_326$equipment_3903$tag_6128
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine_lower: site_114$dept_129$line_326$equipment_3903$tag_6133
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine_upper: site_114$dept_129$line_326$equipment_3903$tag_6134
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine_original: site_114$dept_129$line_326$equipment_3903$tag_6135
ai_yield_pure CTCMP_predicted: site_114$dept_129$line_326$equipment_3903$tag_6081
ai_yield_pure CTCMP_live: site_114$dept_129$line_326$equipment_3903$tag_6082
ai_yield_pure CTCMP_delta: site_114$dept_129$line_326$equipment_3903$tag_6083
site: "site_114"
target:
tables:
profile: "gba_ctcmp_profile"
lookup: "gba_ctcmp_lookup"
device_tag_map:
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2: PI_R7204_2
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1: TI_R7204_3
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2: TIC_R7204_1
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3: FI_R7204_2
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4: FI_R7204_4
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5: FI_R7204_5
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine: AI_R7204
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204: RATIO
device_tag,column,importances,min,max,mean,std_dev,range_min,range_max,golden_batch_minimum,golden_batch_maximum,operating_min,operating_max,module,model_version
PI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,19.2,0,1,0.4,0.1,0.3,0.5,0,1,0,1,Golden Batch - CTCMP,latest
TI_R7204_3,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,12.1,139,204,170.7,9.4,156.6,184.8,156,185,156,185,Golden Batch - CTCMP,latest
TIC_R7204_1,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,3.3,171,194,181.3,4.0,175.3,187.3,175,188,175,188,Golden Batch - CTCMP,latest
FI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,8.7,4,368,198.1,67.8,96.4,299.8,96,300,96,300,Golden Batch - CTCMP,latest
FI_R7204_4,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,25.3,50,129,92.1,12.2,73.8,110.3,73,111,73,111,Golden Batch - CTCMP,latest
FI_R7204_5,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,8.6,17,241,140.4,40.3,79.9,200.9,79,201,79,201,Golden Batch - CTCMP,latest
AI_R7204,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,20.6,-1,0,-0.9,0.0,-1.0,-0.9,-1,0,-1,0,Golden Batch - CTCMP,latest
RATIO,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,2.2,-31,9,3.6,1.0,2.1,5.2,2,6,2,6,Golden Batch - CTCMP,latest
KAIROS_URI= https://iLens:iLensJUB$456@jubilant.ilens.io/kairos
KAFKA_HOST=192.168.0.220
KAFKA_PORT=9092
KAFKA_TOPIC=ilens_dev
POSTGRES_URI = postgresql://iLens:iLensJUB$456@jubilant.ilens.io/kairos
\ No newline at end of file
Tag ID,Tag Name,Site,Plant,Line,Equipment,Parameter Name
site_114$dept_129$line_326$equipment_3903$tag_4831,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 5
site_114$dept_129$line_326$equipment_3903$tag_4648,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow CV,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow CV
site_114$dept_129$line_326$equipment_3903$tag_4692,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Pressure 2
site_114$dept_129$line_326$equipment_3903$tag_4703,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature 1
site_114$dept_129$line_326$equipment_3903$tag_4704,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature 2
site_114$dept_129$line_326$equipment_3903$tag_4705,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 3,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature 3
site_114$dept_129$line_326$equipment_3903$tag_4828,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 2,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 2
site_114$dept_129$line_326$equipment_3903$tag_4829,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 3
site_114$dept_129$line_326$equipment_3903$tag_4830,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Flow 4
site_114$dept_129$line_326$equipment_3903$tag_4862,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Chlorine
site_114$dept_129$line_326$equipment_3903$tag_4688,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Running status,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Running status
site_114$dept_129$line_326$equipment_3903$tag_4647,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature CV,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature CV
site_114$dept_129$line_326$equipment_3903$tag_5153,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Ratio R7204
site_114$dept_129$line_326$equipment_3903$tag_4570,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature,Bharuch-Unit 2,CTCMP,Stage IIA - Chlorination & Neutralization-I,Reactor R-7204,Temperature
parameters,raw_parameter,lower_parameter,upper_parameter,original_parameter
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,Pressure 2,ai_yield_Pressure 2_lower,ai_yield_Pressure 2_upper,ai_yield_Pressure 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,Temperature 1,ai_yield_Temperature 1_lower,ai_yield_Temperature 1_upper,ai_yield_Temperature 1_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,Temperature 2,ai_yield_Temperature 2_lower,ai_yield_Temperature 2_upper,ai_yield_Temperature 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,Flow 3,ai_yield_Flow 3_lower,ai_yield_Flow 3_upper,ai_yield_Flow 3_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,Flow 4,ai_yield_Flow 4_lower,ai_yield_Flow 4_upper,ai_yield_Flow 4_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,Flow 5,ai_yield_Flow 5_lower,ai_yield_Flow 5_upper,ai_yield_Flow 5_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,Chlorine,ai_yield_Chlorine_lower,ai_yield_Chlorine_upper,ai_yield_Chlorine_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,Ratio R7204,ai_yield_Ratio R7204_lower,ai_yield_Ratio R7204_upper,ai_yield_Ratio R7204_original
This source diff could not be displayed because it is too large. You can view the blob instead.
APP_NAME=ebpr_report_engine
MONGO_URI=mongodb://192.168.0.220:2717/
KAIROS_URI= http://192.168.0.220:8080
MQTT_URL=192.168.0.220
MQTT_PORT=1883
SCHEDULER_PROXY=http://192.168.0.220/ilens-scheduler
ILENS_META_SERVICE_URL=http://192.168.0.220/ilens_api
EBPR_DATA_ENGINE_PROXY=http://192.168.0.220/formde
EBPR_PROXY=http://192.168.0.220/ebpr
WORKFLOW_MGMT_PROXY = http://jubilant.ilens.io/workflow-mt
REDIS_HOST=192.168.0.220
REDIS_PORT=6379
REDIS_BROKER_URI_CELERY = redis://192.168.0.220:6379/10
SECURITY_IP_CHECK=false
SECURITY_USER_CHECK=true
SECURITY_AGENT_CHECK=true
LOG_LEVEL=INFO
LOG_TRACEBACK=true
REPORT_DIRECTORY=reports
BASE_DIRECTORY=data
EBPR_REPORT_PROXY=http://192.168.0.220/ebpr_reports
ENABLE_CELERY_WORKER=true
HUB_NAME=
CONNECTION_STRING=
API_VERSION=
MOBILE_PUSH_NOTIFICATION=false
KAFKA_HOST=192.168.0.220
KAFKA_PORT=9092
KAFKA_TOPIC=ilens_dev
START_DATE=1627776020
END_DATE=1628035220
\ No newline at end of file
device_tag,column,importances,min,max,mean,std_dev,range_min,range_max,golden_batch_minimum,golden_batch_maximum,operating_min,operating_max,module,model_version,rec_lower,rec_upper,rec_original,rec_status
PI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,19.2,0,1,0.4,0.1,0.3,0.5,0,1,0,1,Golden Batch - CTCMP,latest,0,1,0.43,Operator to make changes based on recommendations!
TI_R7204_3,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,12.1,139,204,170.7,9.4,156.6,184.8,156,185,156,185,Golden Batch - CTCMP,latest,165,168,166.54,Operator to make changes based on recommendations!
TIC_R7204_1,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,3.3,171,194,181.3,4.0,175.3,187.3,175,188,175,188,Golden Batch - CTCMP,latest,186,188,186.81,Operator to make changes based on recommendations!
FI_R7204_2,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,8.7,4,368,198.1,67.8,96.4,299.8,96,300,96,300,Golden Batch - CTCMP,latest,96,127,104.17,Operator to make changes based on recommendations!
FI_R7204_4,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,25.3,50,129,92.1,12.2,73.8,110.3,73,111,73,111,Golden Batch - CTCMP,latest,87,92,89.45,Operator to make changes based on recommendations!
FI_R7204_5,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,8.6,17,241,140.4,40.3,79.9,200.9,79,201,79,201,Golden Batch - CTCMP,latest,171,201,199.57,Operator to make changes based on recommendations!
AI_R7204,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,20.6,-1,0,-0.9,0.0,-1.0,-0.9,-1,0,-1,0,Golden Batch - CTCMP,latest,-1,0,-0.33,Operator to make changes based on recommendations!
RATIO,Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,2.2,-31,9,3.6,1.0,2.1,5.2,2,6,2,6,Golden Batch - CTCMP,latest,2,4,3.25,Operator to make changes based on recommendations!
flavors:
python_function:
env: conda.yaml
loader_module: mlflow.sklearn
model_path: model.pkl
python_version: 3.8.12
sklearn:
pickled_model: model.pkl
serialization_format: cloudpickle
sklearn_version: 1.0.2
utc_time_created: '2022-03-31 08:38:33.992936'
channels:
- conda-forge
dependencies:
- python=3.8.12
- pip
- pip:
- mlflow
- cloudpickle==2.0.0
- psutil==5.9.0
- scikit-learn==0.0
- scikit-learn==1.0.2
- typing-extensions==4.1.1
name: mlflow-env
mlflow
cloudpickle==2.0.0
psutil==5.9.0
scikit-learn==0.0
scikit-learn==1.0.2
typing-extensions==4.1.1
\ No newline at end of file
columns,importances
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,25.34742183515991
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,20.56310571518254
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,19.191097470789952
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,12.090429856362187
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,8.715560671364255
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,8.626076494006318
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,3.259925613054504
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,2.20638234408034
parameters,raw_parameter,lower_parameter,upper_parameter,original_parameter
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Pressure 2,Pressure 2,ai_yield_Pressure 2_lower,ai_yield_Pressure 2_upper,ai_yield_Pressure 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 1,Temperature 1,ai_yield_Temperature 1_lower,ai_yield_Temperature 1_upper,ai_yield_Temperature 1_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Temperature 2,Temperature 2,ai_yield_Temperature 2_lower,ai_yield_Temperature 2_upper,ai_yield_Temperature 2_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 3,Flow 3,ai_yield_Flow 3_lower,ai_yield_Flow 3_upper,ai_yield_Flow 3_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 4,Flow 4,ai_yield_Flow 4_lower,ai_yield_Flow 4_upper,ai_yield_Flow 4_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Flow 5,Flow 5,ai_yield_Flow 5_lower,ai_yield_Flow 5_upper,ai_yield_Flow 5_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Chlorine,Chlorine,ai_yield_Chlorine_lower,ai_yield_Chlorine_upper,ai_yield_Chlorine_original
Bharuch-Unit 2.CTCMP.Stage IIA - Chlorination & Neutralization-I.Reactor R-7204.Ratio R7204,Ratio R7204,ai_yield_Ratio R7204_lower,ai_yield_Ratio R7204_upper,ai_yield_Ratio R7204_original
pytz==2021.3
loguru==0.5.3
scipy==1.7.1
numpy==1.21.2
pandas==1.3.3
mlflow==1.20.2
sklearn
simplejson==3.17.5
requests==2.26.0
pydantic==1.8.2
python-dotenv==0.19.2
PyYAML==6.0
kafka-python==1.4.7
SQLAlchemy==1.3.20
sqlparse==0.4.2
psycopg2==2.9.1
\ No newline at end of file
from loguru import logger
from scripts.core.data.data_import import DataPuller
from scripts.constants.app_configuration import KAIROS_DB_HOST, data_conf, training_data
def training_data_generator():
logger.info("Fetching the training data!")
training_data_puller = DataPuller(db_host=KAIROS_DB_HOST, data_config=data_conf, payload="train_query")
logger.debug("Pulling the training data from Kairos")
df_training_data = training_data_puller.get_data()
logger.debug("Attempting to save the loaded data!")
df_training_data.to_csv(training_data, index=False)
logger.info("Training Data has been saved successfully!")
if __name__ == '__main__':
# Create the training data!
training_data_generator()
from dotenv import load_dotenv
import os
import sys
from configparser import ConfigParser, BasicInterpolation
import yaml
# Configuration File Constants
_application_conf = f"./conf/application.conf"
_default_conf = f"./config.env"
data_conf = f"./conf/data.yml"
training_data = f"./data/training.csv"
load_dotenv(dotenv_path=_default_conf)
class EnvInterpolation(BasicInterpolation):
"""
Interpolation which expands environment variables in values.
"""
def before_get(self, parser, section, option, value, defaults):
value = super().before_get(parser, section, option, value, defaults)
if not os.path.expandvars(value).startswith("$"):
return os.path.expandvars(value)
else:
return
try:
config = ConfigParser(interpolation=EnvInterpolation())
config.read(_application_conf)
except Exception as e:
print(f"Error while loading the config: {e}")
print("Failed to Load Configuration. Exiting!!!")
sys.exit()
class Logging:
level = config.get("LOGGING", "level", fallback="INFO")
level = level if level else "INFO"
tb_flag = config.getboolean("LOGGING", "traceback", fallback=True)
tb_flag = tb_flag if tb_flag is not None else True
# Configuration Variables
# Kairos Configuration Variables
KAIROS_DB_HOST = config["KAIROS_DB"]["uri"]
# Postgres Configuration Variables
POSTGRES_URI = config["POSTGRES"]["postgres_uri"]
# Kafka Configuration Variables
KAFKA_HOST = config["KAFKA"]["kafka_host"]
KAFKA_PORT = config["KAFKA"]["kafka_port"]
KAFKA_TOPIC = config["KAFKA"]["kafka_topic"]
# Read the configuration file
with open(data_conf, "r") as _cf:
_config = yaml.full_load(_cf)
MODEL = _config["model"]
PARAMETER_INFO = _config["parameter"]
PARAMETER_INPUT = MODEL["model_input_features"]
OUTPUT_DATA = _config["output"]
OUTPUT_PARAMETER_QUICK_MAP_INFO = OUTPUT_DATA["quick_map"]
TARGET_VALUE = OUTPUT_DATA["target"]
OUTPUT_SITE_ID = OUTPUT_DATA["site"]
from json import dumps
from kafka import KafkaProducer
from loguru import logger
from scripts.constants.app_configuration import KAFKA_HOST, KAFKA_PORT, KAFKA_TOPIC
class KafkaProducerUtil:
def __init__(self):
try:
self.host = KAFKA_HOST
self.port = KAFKA_PORT
logger.debug(f"Connecting to Kafka with details: {self.host}, {self.port}")
kafka_broker = [self.host + ":" + str(self.port)]
self.producer = KafkaProducer(
bootstrap_servers=kafka_broker,
value_serializer=lambda v: v.encode('utf-8'),
api_version=(0, 10, 1))
self.producer.flush()
except Exception as e:
logger.error(f"Kafka connection error: {e}")
def publish(self, topic, data):
try:
kafka_response = self.producer.send(topic, data)
self.producer.flush()
logger.debug(f" Message sent to kafka with response: {kafka_response}")
return True
except Exception as e:
logger.error(e)
return False
import json
import pandas as pd
import requests
import yaml
from loguru import logger
class DataPuller(object):
def __init__(self, db_host, data_config, payload, absolute_time=None, optional_payload=None):
_config_file = data_config
with open(_config_file, 'r') as _cf:
self.conf = yaml.full_load(_cf)
self.db_host_url = db_host
self.request_url = "{kairos_host}/api/v1/datapoints/query".format(kairos_host=self.db_host_url)
self.payload = self.conf[payload]
self.column_rename = self.conf["column_renamer"]
if absolute_time is not None:
if "start_relative" in self.payload:
del self.payload["start_relative"]
if "end_relative" in self.payload:
del self.payload["end_relative"]
self.payload["start_absolute"] = absolute_time["start_absolute"]
self.payload["end_absolute"] = absolute_time["end_absolute"]
self.payload = json.dumps(self.payload)
def get_data(self):
logger.info("Data for the parameters being pulled from Kairos Database")
response_data = requests.post(url=self.request_url, data=self.payload).json()
output_data = response_data["queries"]
logger.debug("Data pull complete")
df_final = pd.DataFrame()
for i in range(len(output_data)):
grouped_output_data = output_data[i]["results"]
for each_grouped_data in grouped_output_data:
value = (each_grouped_data["values"])
tag_id = each_grouped_data["group_by"][0]["group"]["c3"]
try:
logger.debug("Renamed {} to {} in Data".format(tag_id, self.column_rename[tag_id]))
column_name = self.column_rename[tag_id]
except KeyError as ke:
logger.debug("Column Renaming Logic not found for {}".format(tag_id))
column_name = tag_id
df_column_data = pd.DataFrame(data=value, columns=["timestamp", column_name])
if df_final.empty:
df_final = df_column_data
else:
df_final = df_final.merge(df_column_data, how="outer", left_on="timestamp", right_on="timestamp")
df_final["epochtime"] = df_final["timestamp"]
df_final["timestamp"] = pd.to_datetime(df_final['timestamp'], unit="ms").dt.tz_localize('UTC').dt.tz_convert(
'Asia/Kolkata')
logger.debug("Final number of columns : {}".format(str(len(list(df_final.columns)))))
return df_final
from loguru import logger
from scripts.core.data.data_import import DataPuller
from scripts.constants.app_configuration import KAIROS_DB_HOST, data_conf, training_data
def training_data_generator():
logger.info("Fetching the training data!")
training_data_puller = DataPuller(db_host=KAIROS_DB_HOST, data_config=data_conf, payload="train_query")
logger.debug("Pulling the training data from Kairos")
df_training_data = training_data_puller.get_data()
logger.debug("Attempting to save the loaded data!")
df_training_data.to_csv(training_data, index=False)
logger.info("Training Data has been saved successfully!")
# if __name__ == '__main__':
# # Create the training data!
# training_data_generator()
import mlflow
import mlflow.sklearn
from loguru import logger
class ModelLoader(object):
def __init__(self, model_info):
self.model_info = model_info
def load_model(self):
logger.info("Loading the Model")
if self.model_info["type"] == "mlflow.sklearn":
return self._load_mlflow_sklearn_model()
else:
logger.info("Unsupported Model Type")
def _load_mlflow_sklearn_model(self):
try:
_model = mlflow.sklearn.load_model(self.model_info["path"])
print("path =", self.model_info["path"])
logger.debug("Model loaded successfully!")
return _model
except Exception as e:
logger.error("Error while loading mlflow.sklearn model : {}".format(str(e)))
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
import json
import math
import numpy as np
import pandas as pd
from loguru import logger
from scipy.optimize import differential_evolution
from scripts.constants.app_configuration import PARAMETER_INFO, PARAMETER_INPUT, POSTGRES_URI, MODEL, \
KAIROS_DB_HOST, data_conf, TARGET_VALUE, OUTPUT_DATA
from scripts.core.data.data_import import DataPuller
from scripts.core.model_factory.model_loader import ModelLoader
class ParameterOptimization(object):
def __init__(self, parameter_info=PARAMETER_INFO):
self.original_bound = None
self.parameter_info = parameter_info
self.model = ModelLoader(model_info=MODEL).load_model()
@staticmethod
def calculate_current_bounds():
data = "conf/profile.csv"
df_profile = pd.read_csv(data)
data_designated_profile = df_profile.to_dict(orient="records")
bounds_data = dict()
for each_record in data_designated_profile:
bounds_data[each_record["column"]] = {
"min": each_record["operating_min"],
"max": each_record["operating_max"]
}
return df_profile, bounds_data
def calculate_bounds(self, live_data, feed_input_columns):
"""
:param live_data: dictionary of a single row of dataframe orient="records"
:param feed_input_columns: list of columns of the type Feed Drum Inputs
:return:
"""
logger.info("Calculating Bounds for the data")
try:
temporary_bound_data = self.original_bound.copy()
if self.parameter_info["non_controllable_params"] is not None:
for each_parameter in self.parameter_info["non_controllable_params"]:
temporary_bound_data[each_parameter] = {"min": int(live_data[each_parameter]),
"max": int(live_data[each_parameter])}
if feed_input_columns:
for each_parameter in feed_input_columns:
temporary_bound_data[each_parameter] = {"min": int(live_data[each_parameter]) - 1,
"max": int(live_data[each_parameter]) + 1}
formatted_bound_data = dict()
for each_key in PARAMETER_INPUT:
formatted_bound_data[each_key] = (
int(temporary_bound_data[each_key]["min"]), int(temporary_bound_data[each_key]["max"]))
print("Formatted Bound Data", formatted_bound_data)
return list(formatted_bound_data.values())
except Exception as e:
logger.error("Unable to calculate bounds for the data : {}".format(str(e)))
def verify_operational_bounds(self, data_dict):
temporary_bound_data = self.original_bound.copy()
temporary_recommendation_data = data_dict.copy()
operational_bound_flag = True
for each_key in PARAMETER_INPUT:
parameter = each_key
parameter_lower = "{}_lower".format(parameter)
parameter_upper = "{}_upper".format(parameter)
parameter_original = "{}_original".format(parameter)
if (data_dict[parameter_original] < temporary_bound_data[parameter]["min"]) or (
data_dict[parameter_original] > temporary_bound_data[parameter]["max"]):
operational_bound_flag = False
if data_dict[parameter_lower] < temporary_bound_data[parameter]["min"]:
temporary_recommendation_data[parameter_lower] = temporary_bound_data[parameter]["min"]
if data_dict[parameter_upper] > temporary_bound_data[parameter]["max"]:
temporary_recommendation_data[parameter_upper] = temporary_bound_data[parameter]["max"]
print("operational_bound_flag --> ", operational_bound_flag)
print("temporary_recommendation_data --> ", temporary_recommendation_data)
return operational_bound_flag
def _objective_fun(self, x):
temp_df = pd.DataFrame(columns=PARAMETER_INPUT, data=[x])
return 0 - self.model.predict(temp_df)[0]
def get_recommendation(self, live_processed_data_dict, bounds):
rs = differential_evolution(
self._objective_fun,
bounds=bounds,
seed=42,
workers=1)
return rs.x
@staticmethod
def find_beta_recommendation(at_value, au_value, am_value, an_value, ao_value):
column_renamer_dict = {
"Surge Tank Pyridine": "T",
"Surge Tank Beta": "U",
"AMM Rec Btm Pyr": "Y",
"Amm Rec Btm Beta": "Z"
}
data = DataPuller(db_host=KAIROS_DB_HOST, data_config=data_conf, payload="beta_calculation_query").get_data()
data = data.rename(columns=column_renamer_dict, inplace=False)
data = data[["T", "U", "Y", "Z"]]
data = data.to_dict(orient="records")[0]
data["AT"] = at_value # "Totalizers.FQ-5126",
data["AU"] = au_value # Totalizers.FQ-5263A
data["AM"] = am_value # FQ-5102 - FQ-6102
data["AN"] = an_value # FQ-5111 - FQ-6100
data["AO"] = ao_value # FQ-5161 - FQ-6104
data["BG"] = data["AT"] * data["T"]
data["BH"] = data["AT"] * data["U"]
data["BM"] = data["AU"] * data["Y"]
data["BN"] = data["AU"] * data["Z"]
common_denominator = data["BG"] + data["BH"] + data["BM"] + data["BN"]
data["RPT_Pyr"] = 100 * ((data["BM"] + data["BG"]) / common_denominator)
data["RPT_Beta"] = 100 * ((data["BN"] + data["BH"]) / common_denominator)
# (AM12+AN12+AO12)/((AT12*V12/100)+(AU12*AA12/100)
norm_denominator = (data["AT"] * (data["T"] + data["U"])) + (data["AU"] * (data["Y"] + data["Z"]))
rpt_value = (data["AM"] + data["AN"] + data["AO"]) / norm_denominator
# R5 Standard Norm : y = 0.0006x2 - 0.0361x + 1.8569
standard_norm = (0.0006 * data["RPT_Beta"] * data["RPT_Beta"]) - (0.0361 * data["RPT_Beta"]) + 1.8569
data["RPT_Norm"] = 100 * rpt_value
data["Standard_Norm"] = standard_norm
return data["RPT_Beta"], data["RPT_Norm"], data["Standard_Norm"]
def get_original_recommendation_values(self, data_dict, add_predict):
logger.info("Recommending original trend")
df_validate = pd.DataFrame(data_dict)
original_data = df_validate.to_dict(orient="records")[0]
df_validate = df_validate[PARAMETER_INPUT]
live_data = df_validate.to_dict(orient="records")[0]
live_processed_data = df_validate.iloc[0, :].to_numpy().reshape(1, -1)
live_data_prediction = self.model.predict(live_processed_data)[0]
live_data_prediction_15_min = live_data_prediction / 4
logger.debug("Started Calculating the recommendation for the data!")
df_recommendation = df_validate
logger.debug("Recommendation Calculated and received!")
recommendation = df_validate.iloc[0, :].to_numpy().reshape(1, -1)
rec_array = np.array(recommendation).reshape(1, -1)
prediction = self.model.predict(rec_array)[0]
prediction_15_min = prediction / 4
logger.info("Recommendations generated for last received live data.")
recommended_data = df_recommendation.to_dict(orient="records")[0]
# Recommending the upper limit and lower limit factor for the module
# TI - 50 --> 48 & 52
updated_recommendation = dict()
for each_column in recommended_data:
if each_column in self.original_bound:
column_delta = self.original_bound[each_column]["max"] - self.original_bound[each_column]["min"]
percentage_delta_lower = int(100 * (column_delta / self.original_bound[each_column]["min"]))
percentage_delta_upper = int(100 * (column_delta / self.original_bound[each_column]["max"]))
percentage_delta = percentage_delta_lower - percentage_delta_upper
calculated_delta = (0.15 * percentage_delta) / 100
if each_column in self.original_bound:
lower_recommendation = recommended_data[each_column] * (1 - calculated_delta)
upper_recommendation = recommended_data[each_column] * (1 + calculated_delta)
original_recommendation = recommended_data[each_column]
else:
lower_recommendation = recommended_data[each_column] * 0.95
upper_recommendation = recommended_data[each_column] * 1.05
original_recommendation = recommended_data[each_column]
updated_recommendation["{}_lower".format(each_column)] = math.floor(lower_recommendation)
updated_recommendation["{}_upper".format(each_column)] = math.ceil(upper_recommendation)
updated_recommendation["{}_original".format(each_column)] = round(original_recommendation, 2)
predicted_result = dict()
totalizer_max_value = 60000000000000
predicted_totalizer_value = original_data[add_predict] + prediction_15_min
if predicted_totalizer_value > totalizer_max_value:
predicted_totalizer_value = predicted_totalizer_value - totalizer_max_value
predicted_result[add_predict] = predicted_totalizer_value
au_value_15_min = recommended_data["7302011030 Recovery Column-A.FI-5263A"] / 4
am_value_15_min = recommended_data["Feed Drum.FIC-5102"] / 4
an_value_15_min = recommended_data["Feed Drum.FIC-5111"] / 4
ao_value_15_min = recommended_data["Feed Drum.FIC-5161"] / 4
beta_prediction, rpt_norm_prediction, predicted_standard_norm = self.find_beta_recommendation(
at_value=prediction_15_min,
au_value=au_value_15_min,
am_value=am_value_15_min,
an_value=an_value_15_min,
ao_value=ao_value_15_min)
live_au_value_15_min = live_data["7302011030 Recovery Column-A.FI-5263A"] / 4
live_am_value_15_min = live_data["Feed Drum.FIC-5102"] / 4
live_an_value_15_min = live_data["Feed Drum.FIC-5111"] / 4
live_ao_value_15_min = live_data["Feed Drum.FIC-5161"] / 4
beta_live, rpt_norm_live, live_standard_norm = self.find_beta_recommendation(
at_value=live_data_prediction_15_min,
au_value=live_au_value_15_min,
am_value=live_am_value_15_min,
an_value=live_an_value_15_min,
ao_value=live_ao_value_15_min)
predicted_result["ai_yield_Beta Ratio"] = beta_prediction
predicted_result["ai_yield_Beta Ratio Live"] = beta_live
predicted_result["ai_yield_Predicted Norm"] = rpt_norm_prediction
predicted_result["ai_yield_Live Norm"] = rpt_norm_live
predicted_result["ai_yield_Norm_Delta"] = rpt_norm_prediction - rpt_norm_live
predicted_result["ai_yield_Live_Standard_Norm"] = live_standard_norm
predicted_result["ai_yield_Target_Standard_Norm"] = predicted_standard_norm
predicted_result["ai_yield_current_totalizer_prediction"] = live_data_prediction_15_min
predicted_result["ai_yield_recommendation_totalizer_prediction"] = prediction_15_min
predicted_result["ai_yield_Beta_Ratio_Delta"] = beta_prediction - beta_live
if (predicted_result["ai_yield_Beta_Ratio_Delta"] >= 0) & (predicted_result["ai_yield_Norm_Delta"] <= 0):
recommendation_flag = True
else:
recommendation_flag = False
logger.info("Recalculated recommendation flag is {}".format(str(recommendation_flag)))
return recommendation_flag, updated_recommendation, predicted_result
@staticmethod
def update_recommendation_lookup(model_recommendation, engine, profile_data, recommendation_flag,
recommendation_text):
df_recommendation = profile_data
recommendation_data = df_recommendation.to_dict(orient="records")
updated_recommendation_data = list()
for each_data in recommendation_data:
temp_recommendation_data = each_data.copy()
parameter = each_data["column"]
parameter_lower = "{}_lower".format(parameter)
parameter_upper = "{}_upper".format(parameter)
parameter_original = "{}_original".format(parameter)
if parameter_lower in model_recommendation and parameter_upper in model_recommendation:
temp_recommendation_data["rec_lower"] = model_recommendation[parameter_lower]
temp_recommendation_data["rec_upper"] = model_recommendation[parameter_upper]
temp_recommendation_data["rec_original"] = model_recommendation[parameter_original]
updated_recommendation_data.append(temp_recommendation_data)
col = ["rec_lower", "rec_upper", "rec_original"]
df_updated_recommendation = pd.DataFrame(updated_recommendation_data)
df_updated_recommendation[col] = df_updated_recommendation[col].round(2)
df_updated_recommendation['rec_status'] = recommendation_text
df_updated_recommendation.to_csv("{}.csv".format(OUTPUT_DATA["tables"]["lookup"]), index=False)
full_profile_data = "conf/profile.csv"
df_full_profile = pd.read_csv(full_profile_data)
# df_updated_recommendation.to_sql(OUTPUT_DATA["tables"]["lookup"], engine, if_exists="replace")
# df_full_profile.to_sql(OUTPUT_DATA["tables"]["profile"], engine, if_exists="replace")
def find_optimum(self, data_dict, add_predict, controllable=False):
logger.info("Finding optimum data for the last received live data.")
df_validate = pd.DataFrame(data_dict)
original_data = df_validate.to_dict(orient="records")[0]
df_validate = df_validate[PARAMETER_INPUT]
# print("df_validate =", df_validate)
live_data = df_validate.to_dict(orient="records")[0]
live_processed_data = df_validate.iloc[0, :].to_numpy().reshape(1, -1)
# print("model =", self.model)
live_data_prediction = self.model.predict(df_validate)[0]
live_data_prediction_15_min = live_data_prediction / 4
print("Live Data ---> ", data_dict[0])
df_profile, live_bounds = self.calculate_current_bounds()
self.original_bound = live_bounds
feed_input_cols = self.parameter_info["feed_input"]
bounds = self.calculate_bounds(live_data=live_data, feed_input_columns=feed_input_cols)
logger.debug("Started Calculating the recommendation for the data!")
recommendation = (list(map(float, self.get_recommendation(
live_processed_data_dict=live_processed_data, bounds=bounds))))
logger.debug("Recommendation Calculated and received!")
df_recommendation = pd.DataFrame(columns=list(df_validate.columns), data=[recommendation])
prediction = self.model.predict(df_recommendation)[0]
logger.info("Recommendations generated for last received live data.")
recommended_data = df_recommendation.to_dict(orient="records")[0]
print("Predicted Value for Live -->", live_data_prediction)
print("Predicted Value for Recommendation -->", prediction)
# print("Value Experienced In Actual --> ", data_dict[0][TARGET_VALUE])
print("Recommended Data -->", recommended_data)
updated_recommendation = dict()
for each_column in recommended_data:
if each_column in self.original_bound:
column_delta = self.original_bound[each_column]["max"] - self.original_bound[each_column]["min"]
try:
percentage_delta_lower = int(100 * (column_delta / self.original_bound[each_column]["min"]))
percentage_delta_upper = int(100 * (column_delta / self.original_bound[each_column]["max"]))
except:
percentage_delta_lower = column_delta * 2
percentage_delta_upper = column_delta
percentage_delta = percentage_delta_lower - percentage_delta_upper
calculated_delta = (0.15 * percentage_delta) / 100
if each_column in self.original_bound:
lower_recommendation = max(self.original_bound[each_column]["min"],
recommended_data[each_column] * (1 - calculated_delta))
upper_recommendation = min(self.original_bound[each_column]["max"],
recommended_data[each_column] * (1 + calculated_delta))
original_recommendation = recommended_data[each_column]
else:
lower_recommendation = recommended_data[each_column] * 0.95
upper_recommendation = recommended_data[each_column] * 1.05
original_recommendation = recommended_data[each_column]
updated_recommendation["{}_lower".format(each_column)] = math.floor(lower_recommendation)
updated_recommendation["{}_upper".format(each_column)] = math.ceil(upper_recommendation)
updated_recommendation["{}_original".format(each_column)] = round(original_recommendation, 2)
print("Updated Recommendation", json.dumps(updated_recommendation))
predicted_result = dict()
predicted_result["ai_yield_pure CTCMP_predicted"] = prediction
predicted_result["ai_yield_pure CTCMP_live"] = live_data_prediction
predicted_result["ai_yield_pure CTCMP_delta"] = prediction - live_data_prediction
if predicted_result["ai_yield_pure CTCMP_delta"] >= 0:
recommendation_flag = True
final_recommendation_flag = True
recommendation_text = "Operator to make changes based on recommendations!"
else:
recommendation_flag = False
final_recommendation_flag = False
recommendation_text = "Operator to make changes based on recommendations!"
logger.info("Updating lookup with the latest recommendation")
logger.debug("Attempting to write to the Postgres DB")
print("Updated Reco:->", updated_recommendation)
self.update_recommendation_lookup(model_recommendation=updated_recommendation, engine=POSTGRES_URI,
recommendation_flag=recommendation_flag,
recommendation_text=recommendation_text,
profile_data=df_profile)
logger.info("Recommendations shared for the last received live data.")
logger.info("Recommendation flag is {}".format(str(recommendation_flag)))
return final_recommendation_flag, updated_recommendation, predicted_result
from typing import Any, Dict
from pydantic import BaseModel
class AzureDataModel(BaseModel):
blob_name: str
data: Any
class KafkaDataModel(BaseModel):
data: Dict[str, Any]
site_id: str
gw_id: str = ''
pd_id: str = ''
timestamp: int
msg_id: int = 0
partition: str = ''
retain_flag: bool = False
import pandas as pd
import json
file_path = "../../data/ilens_tags.csv"
op_file_path = "../../data/model_op_tags.csv"
def get_keys():
df = pd.read_csv(file_path)
selected_data = {
"Site": ['Bharuch-Unit 2'],
"Plant": ['CTCMP'],
"Line": ['Stage IIA - Chlorination & Neutralization-I'],
"Equipment": ['Reactor R-7204']
}
for each_level in selected_data:
df = df.loc[df[each_level].isin(selected_data[each_level])]
final_data = df.to_dict(orient="records")
record_data = dict()
for each_data in final_data:
record_data[each_data["Tag ID"]] = each_data["Tag Name"]
print("******************************************")
print(json.dumps(record_data))
print("******************************************")
print("******************************************")
print(json.dumps(list(record_data.keys())))
print("******************************************")
def get_op_tags():
df = pd.read_csv(op_file_path)
final_data = df.to_dict(orient="records")
record_data = dict()
for each_data in final_data:
for range_key in ['lower', 'upper', 'original']:
key = f"{each_data['parameters']}_{range_key}"
record_data[key] = each_data[f"{range_key}_parameter_SITE_id"]
print("******************************************")
print(json.dumps(record_data))
print("******************************************")
if __name__ == '__main__':
# get_keys()
get_op_tags()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment