Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
oee-services
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
CI / CD Analytics
Repository Analytics
Value Stream Analytics
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
harshavardhan.c
oee-services
Commits
9b1da609
Commit
9b1da609
authored
Jun 02, 2022
by
harshavardhan.c
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
OEE Dashboard Fixes.
parent
03e2a5aa
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
188 additions
and
239 deletions
+188
-239
live_dashboard.py
live_dashboard.py
+72
-90
production_watchdog.py
production_watchdog.py
+0
-84
scripts/core/engine/oee_aggregator.py
scripts/core/engine/oee_aggregator.py
+67
-42
scripts/core/handlers/api_handler.py
scripts/core/handlers/api_handler.py
+12
-14
scripts/core/handlers/batch_oee_calc_handler.py
scripts/core/handlers/batch_oee_calc_handler.py
+0
-2
scripts/core/handlers/common_handler.py
scripts/core/handlers/common_handler.py
+22
-0
scripts/core/handlers/layout_handler.py
scripts/core/handlers/layout_handler.py
+1
-1
scripts/core/handlers/tag_handler.py
scripts/core/handlers/tag_handler.py
+2
-0
scripts/db/mongo/schema/tag_hierarchy.py
scripts/db/mongo/schema/tag_hierarchy.py
+1
-0
scripts/schemas/batch_oee.py
scripts/schemas/batch_oee.py
+3
-3
scripts/utils/common_utils.py
scripts/utils/common_utils.py
+7
-1
scripts/utils/kafka_util.py
scripts/utils/kafka_util.py
+1
-2
No files found.
live_dashboard.py
View file @
9b1da609
from
scripts.utils.kafka_util
import
DataPush
if
__name__
==
'__main__'
:
from
dotenv
import
load_dotenv
load_dotenv
()
import
os
import
time
from
datetime
import
datetime
import
pytz
from
production_monitoring
import
ProductionMonitor
prod_mon
=
ProductionMonitor
()
data_push
=
DataPush
()
tag_mapping
=
{
"oee"
:
"site_100$dept_100$line_100$equipment_101$tag_215"
,
"availability"
:
"site_100$dept_100$line_100$equipment_101$tag_216"
,
"performance"
:
"site_100$dept_100$line_100$equipment_101$tag_217"
,
"quality"
:
"site_100$dept_100$line_100$equipment_101$tag_218"
,
"running_lot"
:
"site_100$dept_100$line_100$equipment_101$tag_219"
,
"running_item"
:
"site_100$dept_100$line_100$equipment_101$tag_220"
,
"target"
:
"site_100$dept_100$line_100$equipment_101$tag_222"
,
"downtime"
:
"site_100$dept_100$line_100$equipment_101$tag_223"
,
"setup_time"
:
"site_100$dept_100$line_100$equipment_101$tag_225"
,
"running_time"
:
"site_100$dept_100$line_100$equipment_101$tag_226"
}
from
datetime
import
datetime
,
timedelta
from
scripts.constants
import
CommonConstants
,
TagCategoryConstants
from
scripts.core.engine.oee_calculator
import
OEEEngine
from
scripts.core.handlers.batch_oee_calc_handler
import
CalculateBatchOEEHandler
from
scripts.core.handlers.common_handler
import
CommonHandler
from
scripts.logging
import
logger
from
scripts.schemas.batch_oee
import
MachineOEERequest
,
BatchOEEData
,
OEEDataInsertRequest
,
OEEDataSaveRequest
from
scripts.utils.common_utils
import
CommonUtils
from
scripts.utils.kafka_util
import
DataPush
def
oee_update
():
data
=
prod_mon
.
oee_mongo
.
find_record_by_not_status
(
"completed"
)
if
not
data
:
print
(
"No jobs are running, waiting for job to start..."
)
class
MachineOEECalculator
:
def
__init__
(
self
,
project_id
=
None
):
self
.
common_util
=
CommonUtils
()
self
.
batch_oee_handler
=
CalculateBatchOEEHandler
()
self
.
common_handler
=
CommonHandler
(
project_id
=
project_id
)
self
.
oee_engine
=
OEEEngine
()
self
.
data_push
=
DataPush
()
def
calculate_machine_oee
(
self
,
request_data
:
MachineOEERequest
):
try
:
hierarchy_dict
=
self
.
common_handler
.
get_valid_oee_monitoring_hierarchy
(
project_id
=
request_data
.
project_id
)
now
=
datetime
.
today
()
-
timedelta
(
days
=
2
)
oee_start_time
=
datetime
.
strptime
(
request_data
.
monitor_time
,
'
%
H:
%
M'
)
.
replace
(
year
=
now
.
year
,
month
=
now
.
month
,
day
=
now
.
day
)
.
strftime
(
CommonConstants
.
USER_META_TIME_FORMAT
)
oee_end_time
=
datetime
.
now
()
.
strftime
(
CommonConstants
.
USER_META_TIME_FORMAT
)
for
k
,
v
in
hierarchy_dict
.
items
():
site_id
=
k
.
split
(
"$"
)[
0
]
downtime
=
self
.
common_util
.
get_downtime_details_by_hierarchy
(
hierarchy
=
k
,
project_id
=
request_data
.
project_id
)
input_data
=
OEEDataInsertRequest
(
prod_start_time
=
oee_start_time
,
prod_end_time
=
oee_end_time
,
downtime
=
downtime
,
hierarchy
=
k
,
cycle_time
=
os
.
environ
.
get
(
"CYCLE_TIME"
,
default
=
5
),
tz
=
request_data
.
tz
,
project_id
=
request_data
.
project_id
)
input_data
.
total_units
,
input_data
.
reject_units
=
self
.
batch_oee_handler
.
get_data_for_tags
(
input_data
=
input_data
)
oee_response
:
BatchOEEData
=
self
.
oee_engine
.
start_batch_oee_calc
(
request_data
=
OEEDataSaveRequest
(
**
input_data
.
dict
()))
data_dict
=
{
v
[
TagCategoryConstants
.
OEE_OUTPUT_CATEGORY
]:
oee_response
.
oee
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_PERFORMANCE_CATEGORY
]:
oee_response
.
performance
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_QUALITY_CATEGORY
]:
oee_response
.
quality
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_QUALITY_LOSS_CATEGORY
]:
oee_response
.
quality_loss
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_PERFORMANCE_LOSS_CATEGORY
]:
oee_response
.
performance_loss
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_AVAILABILITY_CATEGORY
]:
oee_response
.
availability
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_AVAILABILITY_LOSS_CATEGORY
]:
oee_response
.
availability_loss
}
message_dict
=
{
"data"
:
data_dict
,
"site_id"
:
site_id
,
"gw_id"
:
""
,
"pd_id"
:
""
,
"p_id"
:
request_data
.
project_id
,
"timestamp"
:
int
(
time
.
time
()
*
1000
),
"msg_id"
:
1
,
"retain_flag"
:
False
}
self
.
data_push
.
publish_message
(
message_dict
)
except
Exception
as
e
:
logger
.
exception
(
f
"Exception Occurred while calculating oee for the hierarchy {e.args}"
)
return
print
(
f
"Calculating OEE for {data.get('job')}"
)
data_dict
=
{}
if
data
.
get
(
"run_start_time"
):
run_start_time
=
datetime
.
fromtimestamp
(
data
.
get
(
"run_start_time"
)
//
1000
,
tz
=
pytz
.
timezone
(
"Asia/Bangkok"
)
)
downtime
=
prod_mon
.
automation_engine
.
get_downtime
(
run_start_time
=
run_start_time
,
production_end_time
=
datetime
.
now
(
tz
=
pytz
.
timezone
(
"Asia/Bangkok"
))
)
else
:
downtime
=
0
oee
,
availability
,
performance
,
quality
=
prod_mon
.
calculate_oee_params
(
data
,
downtime
)
data_dict
.
update
(
{
tag_mapping
.
get
(
"running_lot"
):
data
.
get
(
"job"
,
""
),
# job no
tag_mapping
.
get
(
"running_item"
):
data
.
get
(
"item"
,
""
),
# item no
tag_mapping
.
get
(
"target"
):
data
.
get
(
"qty_released"
,
0
),
# quality released
tag_mapping
.
get
(
"oee"
):
oee
,
tag_mapping
.
get
(
"availability"
):
availability
,
tag_mapping
.
get
(
"performance"
):
performance
,
tag_mapping
.
get
(
"quality"
):
quality
,
tag_mapping
.
get
(
"downtime"
):
downtime
,
tag_mapping
.
get
(
"setup_time"
):
data
.
get
(
"setup_time"
,
0
),
tag_mapping
.
get
(
"running_time"
):
data
.
get
(
"running_time"
,
0
),
}
)
message_dict
=
{
"data"
:
data_dict
,
"site_id"
:
prod_mon
.
settings
[
"automation"
][
"site_id"
],
"gw_id"
:
""
,
"pd_id"
:
""
,
"p_id"
:
prod_mon
.
settings
[
"automation"
][
"project_id"
],
"timestamp"
:
int
(
time
.
time
()
*
1000
),
"msg_id"
:
1
,
"retain_flag"
:
False
}
data_push
.
publish_message
(
message_dict
)
good_count
,
units_produced
=
prod_mon
.
get_current_produced_count
()
running_time
=
(
datetime
.
now
()
-
datetime
.
fromtimestamp
(
data
.
get
(
"start_time"
)
/
1000
))
.
total_seconds
()
/
60
mongo_data
=
{
"good_count"
:
good_count
,
"units_produced"
:
units_produced
,
"running_time"
:
running_time
}
data
.
update
(
mongo_data
)
prod_mon
.
oee_mongo
.
update_oee
(
data
,
data
.
get
(
"job"
,
""
),
data
.
get
(
"uf_process"
,
""
),
False
)
if
__name__
==
'__main__'
:
while
True
:
oee_update
()
time
.
sleep
(
3
)
projects_list
=
os
.
environ
.
get
(
"OEE_PROJECTS"
,
default
=
"project_170"
)
monitor_start_time
=
os
.
environ
.
get
(
"OEE_START_TIME"
,
default
=
"00:00"
)
for
project
in
projects_list
.
split
(
","
):
MachineOEECalculator
()
.
calculate_machine_oee
(
request_data
=
MachineOEERequest
(
project_id
=
project
,
monitor_time
=
"00:00"
,
tz
=
"Asia/Kolkata"
))
time
.
sleep
(
10
)
production_watchdog.py
deleted
100644 → 0
View file @
03e2a5aa
if
__name__
==
'__main__'
:
from
dotenv
import
load_dotenv
load_dotenv
()
import
os
import
time
from
datetime
import
datetime
,
timedelta
from
scripts.constants
import
CommonConstants
,
TagCategoryConstants
from
scripts.core.engine.oee_calculator
import
OEEEngine
from
scripts.core.handlers.batch_oee_calc_handler
import
CalculateBatchOEEHandler
from
scripts.core.handlers.common_handler
import
CommonHandler
from
scripts.logging
import
logger
from
scripts.schemas.batch_oee
import
MachineOEERequest
,
BatchOEEData
,
OEEDataInsertRequest
,
OEEDataSaveRequest
from
scripts.utils.common_utils
import
CommonUtils
from
scripts.utils.kafka_util
import
DataPush
class
MachineOEECalculator
:
def
__init__
(
self
,
project_id
=
None
):
self
.
common_util
=
CommonUtils
()
self
.
batch_oee_handler
=
CalculateBatchOEEHandler
()
self
.
common_handler
=
CommonHandler
(
project_id
=
project_id
)
self
.
oee_engine
=
OEEEngine
()
self
.
data_push
=
DataPush
()
def
calculate_machine_oee
(
self
,
request_data
:
MachineOEERequest
):
try
:
hierarchy_dict
=
self
.
common_handler
.
get_valid_oee_monitoring_hierarchy
(
project_id
=
request_data
.
project_id
)
now
=
datetime
.
today
()
-
timedelta
(
days
=
1
)
oee_start_time
=
datetime
.
strptime
(
request_data
.
monitor_time
,
'
%
H:
%
M'
)
.
replace
(
year
=
now
.
year
,
month
=
now
.
month
,
day
=
now
.
day
)
.
strftime
(
CommonConstants
.
USER_META_TIME_FORMAT
)
oee_end_time
=
datetime
.
now
()
.
strftime
(
CommonConstants
.
USER_META_TIME_FORMAT
)
for
k
,
v
in
hierarchy_dict
.
items
():
site_id
=
k
.
split
(
"$"
)[
0
]
downtime
=
self
.
common_util
.
get_downtime_details_by_hierarchy
(
hierarchy
=
k
,
project_id
=
request_data
.
project_id
)
input_data
=
OEEDataInsertRequest
(
prod_start_time
=
oee_start_time
,
prod_end_time
=
oee_end_time
,
downtime
=
downtime
,
hierarchy
=
k
,
cycle_time
=
os
.
environ
.
get
(
"CYCLE_TIME"
,
default
=
5
),
tz
=
request_data
.
tz
,
project_id
=
request_data
.
project_id
)
input_data
.
total_units
,
input_data
.
reject_units
=
self
.
batch_oee_handler
.
get_data_for_tags
(
input_data
=
input_data
)
oee_response
:
BatchOEEData
=
self
.
oee_engine
.
start_batch_oee_calc
(
request_data
=
OEEDataSaveRequest
(
**
input_data
.
dict
()))
data_dict
=
{
v
[
TagCategoryConstants
.
OEE_OUTPUT_CATEGORY
]:
oee_response
.
oee
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_PERFORMANCE_CATEGORY
]:
oee_response
.
performance
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_QUALITY_CATEGORY
]:
oee_response
.
quality
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_QUALITY_LOSS_CATEGORY
]:
oee_response
.
quality_loss
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_PERFORMANCE_LOSS_CATEGORY
]:
oee_response
.
performance_loss
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_AVAILABILITY_CATEGORY
]:
oee_response
.
availability
,
v
[
TagCategoryConstants
.
OEE_OUTPUT_AVAILABILITY_LOSS_CATEGORY
]:
oee_response
.
availability_loss
}
message_dict
=
{
"data"
:
data_dict
,
"site_id"
:
site_id
,
"gw_id"
:
""
,
"pd_id"
:
""
,
"p_id"
:
request_data
.
project_id
,
"timestamp"
:
int
(
time
.
time
()
*
1000
),
"msg_id"
:
1
,
"retain_flag"
:
False
}
self
.
data_push
.
publish_message
(
message_dict
)
except
Exception
as
e
:
logger
.
exception
(
f
"Exception Occurred while calculating oee for the hierarchy {e.args}"
)
return
if
__name__
==
'__main__'
:
while
True
:
projects_list
=
os
.
environ
.
get
(
"OEE_PROJECTS"
,
default
=
"project_170"
)
monitor_start_time
=
os
.
environ
.
get
(
"OEE_START_TIME"
,
default
=
"00:00"
)
for
project
in
projects_list
.
split
(
","
):
MachineOEECalculator
()
.
calculate_machine_oee
(
request_data
=
MachineOEERequest
(
project_id
=
project
,
monitor_time
=
"00:00"
,
tz
=
"Asia/Kolkata"
))
time
.
sleep
(
10
)
scripts/core/engine/oee_aggregator.py
View file @
9b1da609
from
copy
import
deepcopy
import
pandas
as
pd
from
scripts.schemas.batch_oee
import
ChartResponse
,
ChartDBResponse
from
scripts.config
import
DBConf
from
scripts.core.engine.oee_calculator
import
OEETagFinder
from
scripts.core.handlers.batch_oee_calc_handler
import
CalculateBatchOEEHandler
from
scripts.core.handlers.common_handler
import
CommonHandler
from
scripts.db.mongo.schema.tag_hierarchy
import
GetTagsLists
,
OutputTagsList
from
scripts.errors
import
DataNotFound
from
scripts.logging
import
logger
from
scripts.schemas.batch_oee
import
ChartResponse
,
ChartDBResponse
,
ChartRequest
,
OEEDataInsertRequest
from
scripts.utils.common_utils
import
CommonUtils
from
scripts.utils.kairos_db_util
import
BaseQuery
from
scripts.utils.kairos_db_util.df_formation_util
import
create_kairos_df
from
scripts.utils.kairos_db_util.query_kairos
import
KairosQuery
class
OEEAggregator
:
def
__init__
(
self
):
def
__init__
(
self
,
project_id
=
None
):
self
.
common_util
=
CommonUtils
()
self
.
base_query
=
BaseQuery
()
self
.
oee_tag_finder
=
OEETagFinder
()
self
.
common_handler
=
CommonHandler
(
project_id
=
project_id
)
self
.
oee_handler
=
CalculateBatchOEEHandler
(
project_id
=
project_id
)
def
processor
(
self
,
data
):
db_response
=
ChartDBResponse
(
**
data
)
...
...
@@ -24,43 +40,52 @@ class OEEAggregator:
chart_response
=
ChartResponse
(
**
db_response
.
dict
())
return
chart_response
.
dict
()
@
staticmethod
def
aggregator
(
data
,
activity_length
=
1
):
df
=
pd
.
DataFrame
(
data
)
df
[
"total_time"
]
=
(
df
[
"batch_end_time"
]
-
df
[
"batch_start_time"
])
/
60000
df
[
"actual_cycle"
]
=
df
[
"total_units"
]
/
df
[
"total_time"
]
df
[
"ideal_cycle"
]
=
df
[
"cycle_time"
]
df
[
"good_units"
]
=
df
[
"total_units"
]
-
df
[
"reject_units"
]
df
[
"reject_time"
]
=
df
[
"reject_units"
]
*
(
1
/
df
[
"ideal_cycle"
])
agg_oee
=
df
.
sum
()
.
round
(
2
)
availability
=
(
agg_oee
[
"total_time"
]
-
agg_oee
[
"downtime"
])
/
agg_oee
[
"total_time"
]
performance
=
agg_oee
[
"productive_time"
]
/
(
agg_oee
[
"total_time"
]
-
agg_oee
[
"downtime"
]
)
quality
=
(
agg_oee
[
"total_units"
]
-
agg_oee
[
"reject_units"
])
/
agg_oee
[
"total_units"
]
oee_overall
=
round
(
availability
*
performance
*
quality
,
2
)
*
100
availability_loss
=
agg_oee
[
"downtime"
]
/
agg_oee
[
"total_time"
]
*
100
quality_loss
=
agg_oee
[
"reject_time"
]
/
agg_oee
[
"total_time"
]
*
100
chart_response
=
ChartResponse
(
total_units
=
round
(
agg_oee
[
"total_units"
]
-
(
len
(
df
)
*
activity_length
)),
reject_units
=
agg_oee
[
"reject_units"
],
oee
=
oee_overall
,
availability
=
round
(
availability
*
100
,
2
),
downtime
=
agg_oee
[
"downtime"
],
performance
=
round
(
performance
*
100
,
2
),
quality
=
round
(
quality
*
100
,
2
),
actual_cycle
=
agg_oee
[
"actual_cycle"
],
ideal_cycle
=
agg_oee
[
"ideal_cycle"
],
good_units
=
round
(
agg_oee
[
"good_units"
]
-
(
len
(
df
)
*
activity_length
)),
availability_loss
=
availability_loss
,
quality_loss
=
quality_loss
,
performance_loss
=
round
(
100
-
availability_loss
-
quality_loss
-
oee_overall
,
2
),
total_time
=
agg_oee
[
"total_time"
],
productive_time
=
agg_oee
[
"productive_time"
],
)
filtered
=
chart_response
.
dict
()
remove_keys
=
[
"productive_time"
,
"downtime"
,
"reject_units"
]
[
filtered
.
pop
(
each
,
None
)
for
each
in
remove_keys
]
return
filtered
def
aggregator
(
self
,
request_data
:
ChartRequest
):
try
:
start_time
=
int
(
self
.
common_util
.
pendulum_conversion
(
request_data
.
queryDate
[
0
],
tz
=
request_data
.
tz
,
timestamp
=
True
))
*
1000
end_time
=
int
(
self
.
common_util
.
pendulum_conversion
(
request_data
.
queryDate
[
-
1
],
tz
=
request_data
.
tz
,
timestamp
=
True
))
*
1000
hierarchy_tags
=
self
.
common_handler
.
tag_hierarchy_handler
.
get_tags_list_by_hierarchy
(
GetTagsLists
(
**
request_data
.
dict
()))
total_units_tag_id
=
self
.
oee_tag_finder
.
get_total_units_tag_id
(
input_data
=
hierarchy_tags
)
reject_units_tag_id
=
self
.
oee_tag_finder
.
get_reject_units_tag_id
(
input_data
=
hierarchy_tags
)
output_tags_dict
=
self
.
common_handler
.
tag_hierarchy_handler
.
get_output_tags_for_oee
(
input_data
=
OutputTagsList
(
**
request_data
.
dict
()))
if
not
output_tags_dict
or
not
output_tags_dict
.
get
(
request_data
.
hierarchy
):
return
{}
updated_dict
=
self
.
common_handler
.
validate_hierarchy_tags
(
output_tags_dict
[
request_data
.
hierarchy
])
new_columns_dict
=
self
.
common_handler
.
get_oee_keys_mapping_dict
(
output_tags_dict
[
request_data
.
hierarchy
])
tags_list
=
list
(
updated_dict
.
values
())
group_by_tags_list
=
deepcopy
(
tags_list
)
group_by_tags_list
.
append
(
DBConf
.
KAIROS_DEFAULT_FULL_TAG
)
tags_list
.
extend
([
total_units_tag_id
,
reject_units_tag_id
])
if
not
tags_list
:
return
{}
kairos_util
=
KairosQuery
(
url
=
DBConf
.
KAIROS_URL
)
data
=
kairos_util
.
query
(
self
.
base_query
.
form_generic_query
(
tags_list
=
tags_list
,
project_id
=
request_data
.
project_id
,
start_epoch
=
start_time
,
end_epoch
=
end_time
))
master_df
=
pd
.
DataFrame
()
data
=
[
data
]
if
not
isinstance
(
data
,
list
)
else
data
for
each_data
in
data
:
master_df
=
create_kairos_df
(
master_df
=
master_df
,
response_data
=
each_data
,
tags_list
=
tags_list
,
group_by_tags
=
group_by_tags_list
,
tz
=
request_data
.
tz
)
if
master_df
.
empty
:
raise
DataNotFound
master_df_columns
=
list
(
master_df
.
columns
)
input_data
=
{
"prod_start_time"
:
start_time
,
"prod_end_time"
:
end_time
}
input_data
.
update
(
request_data
.
dict
(
exclude_none
=
True
))
input_schema
=
OEEDataInsertRequest
(
**
input_data
)
input_schema
.
total_units
,
input_schema
.
reject_units
=
self
.
oee_handler
.
get_data_for_tags
(
input_data
=
input_schema
)
except
Exception
as
e
:
logger
.
execption
(
f
'Exception occurred while plotting the dashboard {e.args}'
)
scripts/core/handlers/api_handler.py
View file @
9b1da609
...
...
@@ -95,23 +95,21 @@ class APIHandler:
if
not
request_data
.
hierarchy
:
return
dict
()
chart_maker
=
ChartMaker
()
data
=
table_obj
.
get_chart_data
(
hierarchy
=
request_data
.
hierarchy
,
prod_start_time
=
request_data
.
queryDate
[
0
],
prod_end_time
=
request_data
.
queryDate
[
1
],
reference_id
=
request_data
.
reference_id
,
aggregation
=
request_data
.
aggregation
,
tz
=
request_data
.
tz
)
if
not
request_data
.
aggregation
or
len
(
data
)
==
1
:
if
isinstance
(
data
,
list
):
if
request_data
.
reference_id
:
data
=
table_obj
.
get_chart_data
(
hierarchy
=
request_data
.
hierarchy
,
prod_start_time
=
request_data
.
queryDate
[
0
],
prod_end_time
=
request_data
.
queryDate
[
1
],
reference_id
=
request_data
.
reference_id
,
tz
=
request_data
.
tz
)
if
isinstance
(
data
,
list
)
and
data
:
data
=
data
[
0
]
raw_data
=
self
.
oee_agg
.
processor
(
data
)
return
chart_maker
.
main_creator
(
raw_data
,
overall
=
False
)
elif
len
(
data
)
==
0
:
raw_data
=
self
.
oee_agg
.
processor
(
data
)
return
chart_maker
.
main_creator
(
raw_data
,
overall
=
False
)
return
dict
()
else
:
agg_data
=
self
.
oee_agg
.
aggregator
(
data
)
agg_data
=
self
.
oee_agg
.
aggregator
(
request_data
=
request_
data
)
return
chart_maker
.
main_creator
(
agg_data
)
except
Exception
as
e
:
...
...
scripts/core/handlers/batch_oee_calc_handler.py
View file @
9b1da609
...
...
@@ -127,8 +127,6 @@ class CalculateBatchOEEHandler:
datetime
.
strptime
(
input_data
.
prod_end_time
,
CommonConstants
.
USER_META_TIME_FORMAT
)
.
astimezone
(
tz
=
pytz
.
timezone
(
input_data
.
tz
))
.
timestamp
())
*
1000
hierarchy_tags
=
self
.
tag_hierarchy_handler
.
get_tags_list_by_hierarchy
(
GetTagsLists
(
**
input_data
.
dict
()))
# hierarchy_tags = {TagCategoryConstants.TOTAL_UNITS_CATEGORY: "site_114$line_1306$equipment_5812$tag_100",
# TagCategoryConstants.REJECT_UNITS_CATEGORY: "site_114$line_1306$equipment_5812$tag_60538"}
total_units_tag_id
=
self
.
oee_tag_finder
.
get_total_units_tag_id
(
input_data
=
hierarchy_tags
)
reject_units_tag_id
=
self
.
oee_tag_finder
.
get_reject_units_tag_id
(
input_data
=
hierarchy_tags
)
kairos_util
=
KairosQuery
(
url
=
DBConf
.
KAIROS_URL
)
...
...
scripts/core/handlers/common_handler.py
View file @
9b1da609
...
...
@@ -64,3 +64,25 @@ class CommonHandler:
except
Exception
as
e
:
logger
.
exception
(
f
'Exception Occurred while validating the tags for a hierarchy {e.args}'
)
return
{}
@
staticmethod
def
get_oee_keys_mapping_dict
(
input_dict
:
dict
):
final_dict
=
{}
for
k
,
v
in
input_dict
.
items
():
if
k
==
TagCategoryConstants
.
TOTAL_UNITS_CATEGORY
:
final_dict
.
update
({
v
:
"total_units"
})
elif
k
==
TagCategoryConstants
.
REJECT_UNITS_CATEGORY
:
final_dict
.
update
({
v
:
"reject_units"
})
elif
k
==
TagCategoryConstants
.
OEE_OUTPUT_CATEGORY
:
final_dict
.
update
({
v
:
"oee"
})
elif
k
==
TagCategoryConstants
.
OEE_OUTPUT_AVAILABILITY_CATEGORY
:
final_dict
.
update
({
v
:
"availability"
})
elif
k
==
TagCategoryConstants
.
OEE_OUTPUT_PERFORMANCE_CATEGORY
:
final_dict
.
update
({
v
:
"performance"
})
elif
k
==
TagCategoryConstants
.
OEE_OUTPUT_QUALITY_CATEGORY
:
final_dict
.
update
({
v
:
"quality"
})
elif
k
==
TagCategoryConstants
.
OEE_OUTPUT_QUALITY_LOSS_CATEGORY
:
final_dict
.
update
({
v
:
"quality_loss"
})
elif
k
==
TagCategoryConstants
.
OEE_OUTPUT_AVAILABILITY_LOSS_CATEGORY
:
final_dict
.
update
({
v
:
"availability_loss"
})
return
final_dict
scripts/core/handlers/layout_handler.py
View file @
9b1da609
...
...
@@ -10,7 +10,7 @@ class LayoutHandler:
async
def
save_layout
(
self
,
layout_request
:
SaveLayoutRequest
):
try
:
data
=
self
.
oee_layout_conn
.
update_layout
(
data
=
layout_request
.
dict
(),
project_id
=
layout_request
.
project_id
data
=
layout_request
.
dict
(),
project_id
=
layout_request
.
project_id
,
upsert
=
True
)
return
data
except
Exception
:
...
...
scripts/core/handlers/tag_handler.py
View file @
9b1da609
...
...
@@ -28,6 +28,8 @@ class TagHierarchyHandler:
hierarchy_str
=
re
.
escape
(
"|"
.
join
([
f
'{_each}$tag'
for
_each
in
input_data
.
hierarchy_list
]))
elif
input_data
.
hierarchy_level
:
hierarchy_str
=
input_data
.
hierarchy_level
elif
input_data
.
hierarchy
:
hierarchy_str
=
re
.
escape
(
f
'{input_data.hierarchy}$tag'
)
else
:
return
{}
aggregate_query
=
TagHierarchyAggregate
.
tag_aggregate_by_hierarchy_list
(
project_id
=
input_data
.
project_id
,
...
...
scripts/db/mongo/schema/tag_hierarchy.py
View file @
9b1da609
...
...
@@ -12,6 +12,7 @@ class OutputTagsList(BaseModel):
project_id
:
str
hierarchy_list
:
Optional
[
List
]
hierarchy_level
:
Optional
[
str
]
hierarchy
:
Optional
[
str
]
class
Config
:
schema_extra
=
{
...
...
scripts/schemas/batch_oee.py
View file @
9b1da609
from
datetime
import
datetime
from
typing
import
Optional
,
Union
,
List
from
pydantic
import
BaseModel
,
validator
...
...
@@ -42,10 +43,10 @@ class WaterFallChart(BaseModel):
class
ChartRequest
(
BaseModel
):
project_id
:
str
queryDate
:
List
[
str
]
queryDate
:
List
[
str
]
=
[
datetime
.
now
()
.
replace
(
hour
=
00
,
minute
=
00
,
second
=
00
),
datetime
.
now
()
.
replace
(
hour
=
23
,
minute
=
59
,
second
=
59
)]
hierarchy
:
Optional
[
str
]
reference_id
:
Optional
[
str
]
aggregation
:
Optional
[
bool
]
=
False
tz
:
Optional
[
str
]
=
"Asia/kolkata"
class
Config
:
...
...
@@ -70,7 +71,6 @@ class ChartDBResponse(BaseModel):
reject_units
:
int
oee
:
int
availability
:
float
downtime
:
int
performance
:
int
performance_loss
:
float
quality
:
int
...
...
scripts/utils/common_utils.py
View file @
9b1da609
...
...
@@ -5,7 +5,7 @@ import pytz
import
shortuuid
from
scripts.config
import
PathToServices
from
scripts.constants
import
Secrets
,
EndpointConstants
,
UOM
from
scripts.constants
import
Secrets
,
EndpointConstants
,
UOM
,
CommonConstants
from
scripts.db.redis_connections
import
project_details_db
from
scripts.logging
import
logger
from
scripts.utils.auth_util
import
ILensRequest
,
AuthenticationError
...
...
@@ -103,6 +103,12 @@ class CommonUtils:
localized_dt
=
localized_tz
.
localize
(
datetime_with_tz
)
return
localized_dt
@
staticmethod
def
pendulum_conversion
(
date_str
,
tz
,
output_format
=
CommonConstants
.
USER_META_TIME_FORMAT
,
timestamp
=
False
):
if
timestamp
:
return
pendulum
.
parse
(
date_str
,
tz
=
tz
)
.
timestamp
()
return
pendulum
.
parse
(
date_str
,
tz
=
tz
)
.
strftime
(
output_format
)
@
staticmethod
def
get_next_id
(
_
=
None
)
->
str
:
return
shortuuid
.
uuid
()
...
...
scripts/utils/kafka_util.py
View file @
9b1da609
from
ilens_kafka_publisher.v2
import
KafkaPublisher
from
scripts.config
import
KafkaConf
from
scripts.db.redis_connections
import
partition_db
from
scripts.logging
import
logger
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment