Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
D
dalmia_degradation_calculation
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
CI / CD Analytics
Repository Analytics
Value Stream Analytics
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
aakash.bedi
dalmia_degradation_calculation
Commits
b1a1c9a3
Commit
b1a1c9a3
authored
Feb 28, 2023
by
aakash.bedi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
removed not required functions
parent
c6fb4cc7
Pipeline
#59482
failed with stage
Changes
7
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
11 additions
and
111 deletions
+11
-111
app.py
app.py
+2
-6
scripts/core/data_puller_push/push_data.py
scripts/core/data_puller_push/push_data.py
+3
-1
scripts/core/engine/data_training_and_inference.py
scripts/core/engine/data_training_and_inference.py
+0
-1
scripts/core/engine/final_predictions.py
scripts/core/engine/final_predictions.py
+0
-0
scripts/core/engine/final_tags.py
scripts/core/engine/final_tags.py
+1
-18
scripts/core/engine/tags_data.py
scripts/core/engine/tags_data.py
+5
-5
scripts/utils/reading_tags.py
scripts/utils/reading_tags.py
+0
-80
No files found.
app.py
View file @
b1a1c9a3
import
pandas
as
pd
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
from
dotenv
import
load_dotenv
from
dotenv
import
load_dotenv
load_dotenv
(
dotenv_path
=
'config.env'
)
load_dotenv
(
dotenv_path
=
'config.env'
)
import
warnings
import
warnings
import
numpy
as
np
from
loguru
import
logger
from
loguru
import
logger
from
scripts.core.engine.mppt_data
import
GetData
from
scripts.core.engine.mppt_data
import
GetData
from
scripts.core.engine.tags_data
import
get_tags_data
from
scripts.core.engine.tags_data
import
get_tags_data
from
scripts.utils.start_end_date
import
KairosStartEndDate
from
scripts.utils.start_end_date
import
KairosStartEndDate
from
scripts.utils.preprocessing
import
DataPreprocessing
from
scripts.utils.preprocessing
import
DataPreprocessing
from
scripts.core.engine.inv_and_mppt_level
import
TrainingInference
from
scripts.core.engine.data_training_and_inference
import
TrainingInference
from
scripts.core.engine.final_tags
import
GetFinalDf
from
scripts.core.engine.final_predictions
import
ai_modelling
from
scripts.core.engine.model_training_inference
import
ai_modelling
from
scripts.core.engine.raw_predicted_tags
import
get_raw_predicted_tags
from
scripts.core.engine.raw_predicted_tags
import
get_raw_predicted_tags
warnings
.
filterwarnings
(
"ignore"
)
warnings
.
filterwarnings
(
"ignore"
)
...
...
scripts/core/data_puller_push/push_data.py
View file @
b1a1c9a3
...
@@ -80,6 +80,8 @@ class KairosWriter(KafkaProducerUtil):
...
@@ -80,6 +80,8 @@ class KairosWriter(KafkaProducerUtil):
for
k
,
v
in
data
.
items
():
for
k
,
v
in
data
.
items
():
if
not
k
.
startswith
(
"site"
):
if
not
k
.
startswith
(
"site"
):
continue
continue
if
isinstance
(
v
,
(
int
,
float
))
and
str
(
v
)
not
in
(
'nan'
,
'inf'
):
# This function will return True if the "v" is one of the types in the tuple.
# This function will return True if the "v" is one of the types in the tuple
if
isinstance
(
v
,
(
int
,
float
))
and
str
(
v
)
not
in
(
'nan'
,
'inf'
):
__temp__
[
k
]
=
v
__temp__
[
k
]
=
v
return
int
(
timestamp
),
__temp__
return
int
(
timestamp
),
__temp__
scripts/core/engine/
inv_and_mppt_level
.py
→
scripts/core/engine/
data_training_and_inference
.py
View file @
b1a1c9a3
import
pandas
as
pd
import
pandas
as
pd
import
numpy
as
np
import
numpy
as
np
from
loguru
import
logger
from
loguru
import
logger
from
scripts.utils.pycaret_util
import
PycaretUtil
from
scripts.utils.preprocessing
import
DataPreprocessing
from
scripts.utils.preprocessing
import
DataPreprocessing
from
scripts.utils.mlflow_util
import
ModelLoad
from
scripts.utils.mlflow_util
import
ModelLoad
...
...
scripts/core/engine/
model_training_inference
.py
→
scripts/core/engine/
final_predictions
.py
View file @
b1a1c9a3
File moved
scripts/core/engine/final_tags.py
View file @
b1a1c9a3
...
@@ -18,24 +18,6 @@ class GetFinalDf:
...
@@ -18,24 +18,6 @@ class GetFinalDf:
except
Exception
as
e
:
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
logger
.
exception
(
f
'Exception - {e}'
)
@
staticmethod
def
get_predicted_current_tags
(
tags_excel
):
try
:
df
=
tags_excel
.
copy
()
df
[
'parameter_name'
]
=
df
[
'parameter_name'
]
.
str
.
replace
(
'Potential Current MPPT '
,
'potential_current_mppt_'
)
df
[
'inv_id'
]
=
df
[
'inv_id'
]
.
str
.
replace
(
'INV '
,
'inv_'
)
df
[
'mppt_id'
]
=
df
[
'parameter_name'
]
.
copy
()
df
[
'mppt_id'
]
=
df
[
'mppt_id'
]
.
str
.
replace
(
'potential_current_'
,
''
)
req_substrings
=
'mppt_'
data_with_substring
=
[
data
for
data
in
df
[
'mppt_id'
]
if
req_substrings
in
data
]
df
=
df
.
loc
[
df
[
'mppt_id'
]
.
isin
(
data_with_substring
)]
df
=
df
.
sort_values
([
'inv_id'
,
'mppt_id'
])
df
.
reset_index
(
drop
=
True
,
inplace
=
True
)
return
df
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
@
staticmethod
@
staticmethod
def
get_final_predicted_tags
(
df_predicted_current_tags
,
inv_id
,
mppt_id
):
def
get_final_predicted_tags
(
df_predicted_current_tags
,
inv_id
,
mppt_id
):
try
:
try
:
...
@@ -47,6 +29,7 @@ class GetFinalDf:
...
@@ -47,6 +29,7 @@ class GetFinalDf:
tag_id
=
df
.
iloc
[
index
,
df
.
columns
.
get_loc
(
'tag_id'
)]
tag_id
=
df
.
iloc
[
index
,
df
.
columns
.
get_loc
(
'tag_id'
)]
parameter_name
=
df
.
iloc
[
index
,
df
.
columns
.
get_loc
(
'parameter_name'
)]
parameter_name
=
df
.
iloc
[
index
,
df
.
columns
.
get_loc
(
'parameter_name'
)]
final_dict
[
'predicted_current_mppt'
]
=
tag_id
final_dict
[
'predicted_current_mppt'
]
=
tag_id
logger
.
debug
(
f
'tag_id - {tag_id} & parameter name - {parameter_name}'
)
return
final_dict
return
final_dict
except
Exception
as
e
:
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
logger
.
exception
(
f
'Exception - {e}'
)
\ No newline at end of file
scripts/core/engine/tags_data.py
View file @
b1a1c9a3
import
pandas
as
pd
import
pandas
as
pd
from
loguru
import
logger
from
loguru
import
logger
from
scripts.core.data_puller_push.data_puller
import
KairosQuery
from
scripts.core.data_puller_push.data_puller
import
KairosQuery
from
scripts.utils.reading_tags
import
GetTags
base_path
=
'data_folder'
def
get_tags_data
(
tags
,
start_timestamp
,
end_timestamp
):
def
get_tags_data
(
tags
,
start_timestamp
,
end_timestamp
):
try
:
try
:
get_tags
=
GetTags
(
base_path
=
base_path
)
df_merged
=
pd
.
DataFrame
()
df_merged
=
pd
.
DataFrame
()
for
inv_id
in
list
(
tags
[
'inv_id'
]
.
unique
()):
for
inv_id
in
list
(
tags
[
'inv_id'
]
.
unique
()):
df_tags_id
=
get_tags
.
get_tags_id
(
df
=
tags
,
inv_id
=
inv_id
)
df
=
tags
[
tags
[
'inv_id'
]
==
inv_id
]
df_tags_id
=
df
[[
'tag_id'
,
'tag_name'
,
'inv_id'
,
'parameter_name'
,
'mppt_id'
,
'mppt_id_with_equipment'
]]
df_tags_id
.
reset_index
(
drop
=
True
,
inplace
=
True
)
tags_dict
=
df_tags_id
[[
'tag_id'
,
'parameter_name'
]]
.
set_index
(
'tag_id'
)
.
T
.
to_dict
(
orient
=
"records"
)[
0
]
tags_dict
=
df_tags_id
[[
'tag_id'
,
'parameter_name'
]]
.
set_index
(
'tag_id'
)
.
T
.
to_dict
(
orient
=
"records"
)[
0
]
tags_dict
[
'site_107$dept_140$line_371$equipment_4115$tag_15828'
]
=
'tilt_irradiance'
tags_dict
[
'site_107$dept_140$line_371$equipment_4115$tag_15828'
]
=
'tilt_irradiance'
df_data
=
KairosQuery
(
start_timestamp
=
start_timestamp
,
df_data
=
KairosQuery
(
start_timestamp
=
start_timestamp
,
...
...
scripts/utils/reading_tags.py
deleted
100644 → 0
View file @
c6fb4cc7
from
loguru
import
logger
import
pandas
as
pd
class
GetTags
:
def
__init__
(
self
,
base_path
):
self
.
base_path
=
base_path
def
read_tag_excel
(
self
):
try
:
df
=
pd
.
read_excel
(
f
'{self.base_path}/tags_download.xlsx'
)
df
.
drop
([
'Site'
,
'Plant'
,
'Line'
,
'Tag'
,
'Unit'
,
'Tag Register'
,
'System Rules'
,
'Target'
,
'Target limits'
,
'Deviation'
,
'Indicator'
,
'Lower Limit'
,
'Upper Limit'
],
axis
=
1
,
inplace
=
True
)
df
.
rename
(
columns
=
{
'Tag ID'
:
'tag_id'
,
'Tag Name'
:
'tag_name'
,
'Equipment'
:
'inv_id'
,
'Parameter Name'
:
'parameter_name'
},
inplace
=
True
)
return
df
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
def
get_mppt_tags
(
self
,
df
,
substrings
):
try
:
data_with_substring
=
self
.
get_substring_data
(
substrings
=
substrings
,
df
=
df
,
column
=
'parameter_name'
)
req_data_list
=
self
.
removed_substring
(
substring_data_list
=
data_with_substring
,
remove_parameter
=
'Efficiency'
)
df
=
self
.
get_substring_df
(
df
=
df
,
column
=
'parameter_name'
,
substring_data_list
=
req_data_list
)
df
.
reset_index
(
drop
=
True
,
inplace
=
True
)
df
[
'parameter_name'
]
=
df
[
'parameter_name'
]
.
str
.
replace
(
'Voltage MPPT '
,
'voltage_mppt_'
)
df
[
'parameter_name'
]
=
df
[
'parameter_name'
]
.
str
.
replace
(
'Current MPPT '
,
'current_mppt_'
)
df
[
'inv_id'
]
=
df
[
'inv_id'
]
.
str
.
replace
(
'INV '
,
'inv_'
)
df
[
'inv_id'
]
=
df
[
'inv_id'
]
.
str
.
replace
(
'Plant '
,
'plant'
)
df
[
'mppt_id'
]
=
df
[
'parameter_name'
]
.
copy
()
df
[
'mppt_id'
]
=
df
[
'mppt_id'
]
.
str
.
replace
(
'current_'
,
''
)
df
[
'mppt_id'
]
=
df
[
'mppt_id'
]
.
str
.
replace
(
'voltage_'
,
''
)
df
=
df
.
sort_values
([
'inv_id'
,
'mppt_id'
])
df
[
'mppt_id_with_equipment'
]
=
df
[
'parameter_name'
]
+
'_'
+
df
[
'inv_id'
]
df
.
reset_index
(
drop
=
True
,
inplace
=
True
)
return
df
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
@
staticmethod
def
get_tags_id
(
df
,
inv_id
):
try
:
df
=
df
[
df
[
'inv_id'
]
==
inv_id
]
df
=
df
[[
'tag_id'
,
'tag_name'
,
'inv_id'
,
'parameter_name'
,
'mppt_id'
,
'mppt_id_with_equipment'
]]
df
.
reset_index
(
drop
=
True
,
inplace
=
True
)
return
df
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
@
staticmethod
def
get_substring_data
(
substrings
,
df
,
column
):
try
:
req_substrings
=
substrings
data_with_substring
=
[
data
for
data
in
df
[
column
]
if
req_substrings
in
data
]
return
data_with_substring
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
@
staticmethod
def
removed_substring
(
substring_data_list
,
remove_parameter
):
try
:
req_data_list
=
[
data
for
data
in
substring_data_list
if
remove_parameter
not
in
data
]
return
req_data_list
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
@
staticmethod
def
get_substring_df
(
df
,
column
,
substring_data_list
):
try
:
df
=
df
.
loc
[
df
[
column
]
.
isin
(
substring_data_list
)]
df
.
reset_index
(
drop
=
True
,
inplace
=
True
)
return
df
except
Exception
as
e
:
logger
.
exception
(
f
'Exception - {e}'
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment