Commit f3d4565b authored by aakash.bedi's avatar aakash.bedi

modifications

parent 6bc89d6c
......@@ -9,7 +9,34 @@ from datetime import datetime
import pytz
from loguru import logger
from scripts.utils.yield_sheet_3cp_utils.all_tags_3cp import AllTags
from scripts.utils.yield_sheet_3cp_utils.report_generator_3cp import ReportGenerator
from scripts.utils.yield_sheet_3cp_utils.report_generator_3cp import ReportGenerator, get_dpr_report_format, \
get_dpr
def form_excel_multiindex(df, df_format):
# ebpr_dir = os.path.join(FilePath.report_directory, "ebpr")
# if not os.path.exists(ebpr_dir):
# os.mkdir(ebpr_dir)
#
# output_dir = os.path.join(ebpr_dir, "yield")
# if not os.path.exists(output_dir):
# os.mkdir(output_dir)
master_output_file = 'test_prod.xlsx'
master_output_file = get_dpr(master_output_file=master_output_file, df=df, df_format=df_format)
logger.info("XLSX is getting stitched")
# logger.info(f"{os.listdir(output_dir)}")
if os.path.isfile(master_output_file):
return master_output_file, ""
else:
return None, "File not created"
# __kwargs__.update(
# start_date=input_data.property.get(
# "start_date",
......@@ -37,20 +64,17 @@ all_tags = config_engine["tag_heirarcy"]
tags_cal, tags_cal_prev, tags_manual, tags_dcs = AllTags().get_tags(all_tags_dictionary=all_tags)
# start_date = datetime.strptime(start_date, '%Y-%m-%d')
# start_date = start_date.astimezone(pytz.UTC)
start_date = datetime.strptime("2022-12-25", '%Y-%m-%d').replace(hour=5, minute=0, second=0,
start_date = datetime.strptime("2023-02-05", '%Y-%m-%d').replace(hour=5, minute=0, second=0,
microsecond=0)
start_date = start_date.astimezone(pytz.timezone("Asia/Kolkata")).replace(hour=5, minute=0, second=0,
microsecond=0)
# end_date = datetime.strptime(end_date, '%Y-%m-%d')
# end_date = end_date.astimezone(pytz.UTC)
end_date = datetime.strptime("2022-12-26", '%Y-%m-%d').replace(hour=5, minute=0, second=0,
end_date = datetime.strptime("2023-03-20", '%Y-%m-%d').replace(hour=5, minute=0, second=0,
microsecond=0)
end_date = end_date.astimezone(pytz.timezone("Asia/Kolkata")).replace(hour=5, minute=0, second=0,
microsecond=0)
df, message = ReportGenerator(tags_cal=tags_cal, tags_cal_prev=tags_cal_prev, tags_manual=tags_manual,
df_format, df, message = ReportGenerator(tags_cal=tags_cal, tags_cal_prev=tags_cal_prev, tags_manual=tags_manual,
tags_dcs=tags_dcs, start_date=start_date,
end_date=end_date).yield_report_3cp()
......@@ -58,71 +82,5 @@ df, message = ReportGenerator(tags_cal=tags_cal, tags_cal_prev=tags_cal_prev, ta
logger.debug(f'{df.shape}')
logger.debug(f'{message}')
master_output_file = 'test_prod.xlsx'
writer = pd.ExcelWriter(master_output_file, engine='xlsxwriter')
df.to_excel(writer, sheet_name="DPR Sheet", index=True)
workbook = writer.book
format = workbook.add_format(
{'font_name': 'Trebuchet MS', 'text_wrap': True, 'bold': 2, 'font_color': "blue"})
format.set_align('center')
format.set_align('vcenter')
format1 = workbook.add_format({'font_name': 'Trebuchet MS', 'text_wrap': True})
format1.set_align('center')
format1.set_align('vcenter')
header_footer_format = workbook.add_format({
'text_wrap': True
})
no_of_rows = df.shape[0]
worksheet = writer.sheets["DPR Sheet"]
# set the column width as per your requirement
worksheet.set_column('A:F', 15, format)
worksheet.set_column('G:L', 20, format)
worksheet.set_column('N:T', 22, format)
worksheet.set_column('U:Z', 22, format)
worksheet.set_column('AB:AD', 20, format)
worksheet.set_column('AE:AM', 22, format)
worksheet.set_column('AN:AO', 29, format)
worksheet.set_column('AQ:AU', 27, format)
worksheet.set_column('AV:BC', 33, format)
worksheet.set_column('BD:BD', 30, format)
worksheet.set_column('BE:BH', 27, format)
worksheet.set_column('AY:AY', 40, format)
worksheet.set_column('BA:BA', 35, format)
worksheet.set_column('M:M', 10, format)
worksheet.set_column('T:T', 10, format)
worksheet.set_column('AD:AD', 10, format)
worksheet.set_column('AP:AP', 10, format)
worksheet.set_column('BK:BK', 10, format)
worksheet.set_column('AA:AA', 10, format)
worksheet.set_column('AT:AT', 30, format)
worksheet.set_column('BA:BA', 25, format)
format4 = workbook.add_format({'bg_color': 'yellow'})
format5 = workbook.add_format({'text_wrap': True})
worksheet.set_row(0, 28, format5)
worksheet.conditional_format(f'A{no_of_rows + 2}:AP{no_of_rows + 2}',
{'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
worksheet.conditional_format(f'AV{no_of_rows + 2}:AZ{no_of_rows + 2}',
{'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
worksheet.conditional_format(f'BB{no_of_rows + 2}:BD{no_of_rows + 2}',
{'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
# worksheet.conditional_format(f'BF{no_of_rows + 2}:BF{no_of_rows + 2}',
# {'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
worksheet.conditional_format(f'AQ{no_of_rows + 3}:AU{no_of_rows + 3}',
{'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
worksheet.conditional_format(f'BA{no_of_rows + 3}:BA{no_of_rows + 3}',
{'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
worksheet.conditional_format(f'BE{no_of_rows + 3}:BG{no_of_rows + 3}',
{'type': 'cell', 'criteria': '<=', 'value': 10000000, 'format': format4})
writer.save()
if not master_output_file.endswith(".xlsx"):
master_output_file = master_output_file + ".xlsx"
logger.info("XLSX is getting stitched")
form_excel_multiindex(df=df, df_format=df_format)
[KAIROS_DB]
uri = $KAIROS_URI
[LOGGING]
level = $LOG_LEVEL
traceback = $LOG_TRACEBACK
KAIROS_URI= https://iLens:iLensJUB$456@jub-kairos.ilens.io/kairos
KAIROS_URI=https://iLens:iLensJUB$456@jub-kairos.ilens.io/kairos
LOG_LEVEL=INFO
LOG_TRACEBACK=true
This diff is collapsed.
......@@ -43,6 +43,11 @@ class DB:
uri = config["KAIROS_DB"]["uri"]
class Logging:
level = config.get("LOGGING", "level", fallback="INFO")
level = level or "INFO"
tb_flag = config.getboolean("LOGGING", "traceback", fallback=True)
tb_flag = tb_flag or True
......
......@@ -21,7 +21,7 @@ class Kairos_query:
"tags": {
"c3": tag
},
"name": "project_227__ilens.live_data.raw",
"name": "ilens.live_data.raw",
"group_by": [
{
"name": "tag",
......@@ -32,7 +32,7 @@ class Kairos_query:
{
"name": "last",
"sampling": {
"value": "1",
"value": "2",
"unit": "minutes"
}
}
......@@ -52,7 +52,6 @@ class Kairos_query:
output = {}
try:
response = requests.post(self.kairos_url, data=json.dumps(query))
abb = response.json()
grouped_output_data = response.json()["queries"][0]["results"]
for each_grouped_data in grouped_output_data:
value = (each_grouped_data["values"])
......@@ -63,11 +62,10 @@ class Kairos_query:
else:
for k, v in tags_dict.items():
output[v] = 0
# return output
try:
output[tags_dict[tag_id]] = round(value[0][1], 2)
except Exception as e:
logger.exception(f"Exception occurred for tag = {tag_id} and date = {date}", exc_info=True)
logger.exception(f"Exception - {e}", exc_info=True)
output[tags_dict[tag_id]] = 0
return output
except Exception as e:
......
......@@ -5,53 +5,85 @@ from loguru import logger
from scripts.utils.yield_sheet_3cp_utils.data_puller_3cp import Kairos_query
class ManualDcsData:
def __init__(self, start_date, end_date, tags_cal, tags_cal_prev, tags_manual, tags_dcs):
class AllTagsDataPuller:
def __init__(self, start_date, end_date):
self.start_date = start_date
self.end_date = end_date
self.tags_cal = tags_cal
self.tags_cal_prev = tags_cal_prev
self.tags_manual = tags_manual
self.tags_dcs = tags_dcs
def manual_dcs_dataframe(self):
def get_kairos_data(self, tags_lst, tags_dict):
try:
all_manual_dcs_tags_dict, all_cal_tags_dict = TagsDict().all_tags(self.tags_cal, self.tags_cal_prev,
self.tags_manual, self.tags_dcs)
all_manual_dcs_tags = list(all_manual_dcs_tags_dict.values())
# logger.info(f"No of manual and dcs tags = {len(all_manual_dcs_tags)}")
# All calculated tags combined
all_calculated_tags = list(all_cal_tags_dict.values())
# logger.info(f"No of all calculated tags = {len(all_calculated_tags)}")
all_tags = [*all_calculated_tags, *all_manual_dcs_tags]
all_tags_dict = {**all_cal_tags_dict, **all_manual_dcs_tags_dict}
# All dates for which we are going to do calculations
all_dates = [self.start_date + timedelta(days=x) for x in range((self.end_date - self.start_date).days + 1)]
# all_dates.insert(0, self.start_date)
logger.debug(f"Data required for dates : {all_dates}")
# Pulling data for all manual and dcs dates for required dates
date_output = {}
for dates in all_dates:
try:
periodic_start_time = dates
logger.info(f"Pulling Manual and DCS data for date - {periodic_start_time}")
periodic_start_time = dates - timedelta(minutes=0)
logger.info(f"Pulling data for date - {periodic_start_time}")
current_date = periodic_start_time
periodic_start_time = int(periodic_start_time.timestamp()) * 1000
periodic_end_time = int(dates.timestamp()) * 1000
periodic_end_time = dates + timedelta(minutes=10)
periodic_end_time = int(periodic_end_time.timestamp()) * 1000
query_manual_dcs = Kairos_query().kairos_query(start=periodic_start_time, end=periodic_end_time,
tag=all_manual_dcs_tags)
tag=tags_lst)
logger.info(f"{query_manual_dcs}")
data_manual_dcs = Kairos_query().get_data_from_kairos(query=query_manual_dcs,
tags_dict=dict((v, k)
for k, v in all_manual_dcs_tags_dict.items()),
tags_dict=dict((v, k) for k, v in
tags_dict.items()),
date=current_date)
date_output[current_date] = data_manual_dcs
except Exception as e:
logger.exception(f"Exception occurred", exc_info=True)
date_output[dates] = data_manual_dcs
return date_output
except Exception as e:
logger.exception(f"Exception occurred - {e}", exc_info=True)
\ No newline at end of file
logger.exception(f'Exception - {e}')
def get_dataframe(date_dict, tags_lst):
try:
df = pd.DataFrame(index=[i for i in range(len(date_dict))], columns=tags_lst)
df['Date'] = list(date_dict.keys())
col = df.pop("Date")
df.insert(0, col.name, col)
for index in range(df.shape[0]):
for params in df.columns:
if params not in ['Date']:
present_date = df.iloc[index, df.columns.get_loc('Date')]
available_tags = list(date_dict[present_date].keys())
if params not in available_tags:
df.iloc[index, df.columns.get_loc(params)] = None
else:
df.iloc[index, df.columns.get_loc(params)] = date_dict[present_date][params]
df.sort_values('Date', inplace=True)
df.reset_index(drop=True, inplace=True)
df = df.astype({'Date': str,
'7302011030_Consumptions_Closing_DPR': float, 'D1D001Readings_T_2703_A_DPR': float,
'D1D001Readings_T_2703_B_DPR': float, 'D1D001Readings_Conv_DPR': float,
'D1D001_consumptions_Day_Receipt_DPR': float, '7302011030_Consumptions_Day_Receipt_DPR': float,
'7302011061_Consumption_Day_Receipt_DPR': float, '7302011061_Consumption_Day_Cons_DPR': float,
'Pure_Production_Day_Nia_DPR': float, 'Pure_Production_Day_Drum_Filling_DPR': float,
'Pure_Production_Pure_tank_Dead_Volumes_DPR': float, 'Utility_report_Day_Power': float,
'Utility_report_Day_Steam': float, 'Utility_report_Day_Raffinate': float,
'Utility_report_Vent_Gas_Raffinate': float, 'Utility_report_Day_DM': float,
'Utility_report_Day_Treated_Water': float, 'Utility_report_Raffinate_Incinerated': float,
'Beta_Purification_Column_C_2409_Outlet_Flow_TZ': float, 'D1D001Readings_T_2101_A_DPR': float,
'D1D001Readings_T_2101_B_DPR': float, 'Pure_Production_LT_2701_A_DPR': float,
'Pure_Production_LT_2701_B_DPR': float, 'D1D001_consumptions_Opening_DPR': float,
'D1D001_consumptions_Day_Cons_DPR': float, '7302011030_Consumptions_Opening_DPR': float,
'7302011030_Consumptions_Day_Cons_DPR': float, '7302011061_Consumption_Opening_DPR': float,
'Crude_Prod_Day_Prod_DPR': float, 'Pure_Production_Opening_DPR': float,
'Pure_Production_Day_Prod_DPR': float, 'Utility_report_Power_Norms': float,
'Utility_report_Steam_Norms': float, 'Utility_report_Raffinate_Norms': float,
'Utility_report_Raffinate_Vent_Gas': float, 'Utility_report_Raw_Water_Norms': float,
'Utility_report_per_hr_burn_rate': float, 'D1D001_consumptions_Closing_DPR': float,
'Utility_report_Actual_Ammonia_Norms': float, 'Utility_report_Actual_Beta_Norms': float,
'Utility_report_Actual_Benzene_Norms': float, 'Utility_report_Day_DM_norm': float,
'D1D001_consumptions_Day_Recovery_DPR': float, 'D1D001_consumptions_Total_Receipt_DPR': float,
'7302011030_Consumptions_Total_Receipt_DPR': float,
'7302011030_Consumptions_Total_Cons_DPR': float,
'7302011061_Consumption_Closing_DPR': float, '7302011061_Consumption_Total_Receipt_DPR': float,
'7302011061_Consumption_Total_Cons_DPR': float, 'Crude_Prod_Total_Prod_DPR': float,
'Pure_Production_Closing_of_Pure_Tanks_only_DPR': float,
'Pure_Production_Total_Prod_DPR': float, 'Pure_Production_Total_Nia_DPR': float,
'Pure_Production_Total_Drum_Filling_DPR': float})
return df
except Exception as e:
logger.exception(f'Exception - {e}')
\ No newline at end of file
......@@ -3,8 +3,8 @@ from loguru import logger
class ReorderRename:
def __init__(self,df_ebpr):
self.df_ebpr=df_ebpr
def __init__(self, df_ebpr):
self.df_ebpr = df_ebpr
def reorder_rename(self):
try:
df1 = self.df_ebpr[['Date',
......
No preview for this file type
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment