[4]:
import pandas as pd
from bc_combined_modelling import utils
[5]:
year=2046
scenario='NUC_standard_2035'
[6]:
from bc_combined_modelling.clews_to_pypsa import FutureTechInvestments
clews_result_extraction_args = {
'year': year,
'scenario_name': scenario,
'storage_algorithm': 'Kotzur',
'timeslices': 24, # set default as 24
'solver_name': 'gurobi'
}
clews_investments=FutureTechInvestments(**clews_result_extraction_args)
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
Cell In[6], line 9
1 from bc_combined_modelling.clews_to_pypsa import FutureTechInvestments
2 clews_result_extraction_args = {
3 'year': year,
4 'scenario_name': scenario,
(...) 7 'solver_name': 'gurobi'
8 }
----> 9 clews_investments=FutureTechInvestments(**clews_result_extraction_args)
File /local-scratch/localhome/mei3/eliasinul/work/BC_Combined_Modelling/bc_combined_modelling/clews_to_pypsa.py:20, in FutureTechInvestments.__init__(self, year, scenario_name, storage_algorithm, timeslices, solver_name)
17 self.timeslices=timeslices
18 self.solver_name:str=solver_name
---> 20 self.aparser=AttributesParser()
21 self.clewb_cfg = self.aparser.clewsb_config
22 self.get_data_paths()
File <string>:4, in __init__(self, combined_model_config_path)
File /local-scratch/localhome/mei3/eliasinul/work/BC_Combined_Modelling/bc_combined_modelling/attributes_parser.py:36, in AttributesParser.__post_init__(self)
32 self.clews_builder_skeleton_source:str|Path=Path('models/BC_Nexus/config/clews_builder_skeleton.yaml')
34 # Load the user configuration master file by using the method
35 # self.data_cfg:Dict[str:dict]=AttributesParser.load_config(self.data_cfg_path)
---> 36 self.cm_config:Dict[str,dict] = AttributesParser.load_config(self.combined_model_config_path)
37 self.clewsb_config:Dict[str,dict] = AttributesParser.load_config(self.clews_builder_config_path)
38 self.clews_cm:Dict= self.cm_config.get('clews','clews')
File /local-scratch/localhome/mei3/eliasinul/work/BC_Combined_Modelling/bc_combined_modelling/attributes_parser.py:47, in AttributesParser.load_config(config_file_path)
42 @staticmethod
43 def load_config(config_file_path):
44 """
45 Loads the yaml file as dictionary and extracts the attributes to pass on child classes.
46 """
---> 47 with open(config_file_path, 'r') as file:
48 data = yaml.safe_load(file)
49 return data
FileNotFoundError: [Errno 2] No such file or directory: 'config/config.yaml'
[ ]:
clews_cap_builds=pd.read_csv('../results/clews/Model_Kotzur_CNZ_LIMITED_CO2/8ts_csvs_gurobi/TotalCapacityAnnual.csv')
PWR_capacity=clews_cap_builds[clews_cap_builds['TECHNOLOGY'].str.startswith('PWR') & ~clews_cap_builds['TECHNOLOGY'].str.contains('PWRTRN')]
PWR_capacity_year=PWR_capacity[PWR_capacity['YEAR']==year]
[ ]:
cfg=utils.load_config('../config/clews_builder.yaml')
techs_wnd = {k_tech: v_info for k_tech, v_info in cfg['TECHNOLOGIES'].items() if 'WND' in k_tech}['PWRWND']
techs_sol = {k_tech: v_info for k_tech, v_info in cfg['TECHNOLOGIES'].items() if 'SOL' in k_tech}['PWRSOL']
[ ]:
existing_techs_wnd={k: v for k, v in techs_wnd.items() if v.get('status') == 'existing'}
# future_techs_wnd_resource_options = {k: v for k, v in future_techs_wnd.items() if int(k[-2:]) > 11} #exclude the existing
[ ]:
len(existing_techs_wnd.keys())
10
[ ]:
future_techs_wnd={k: v for k, v in techs_wnd.items() if v.get('status') == 'future'}
future_techs_wnd_resource_options = {k: v for k, v in future_techs_wnd.items() if int(k[-2:]) > 11} #exclude the existing
future_techs_sol={k: v for k, v in techs_sol.items() if v.get('status') == 'future'}
future_techs_sol_resource_options = {k: v for k, v in future_techs_sol.items() if int(k[-1:]) > 1} #exclude the existing
[ ]:
resource_options_solar=pd.read_csv('../results/linking/resource_options_solar.csv',index_col='cluster_id')
resource_options_wind=pd.read_csv('../results/linking/resource_options_wind.csv',index_col='cluster_id')
[ ]:
future_techs_sol_mapping = pd.DataFrame(columns=['clews_id', 'name', 'operational_life', 'start_year', 'potential_capacity_GW','clews_investment_year','invested_capacity_GW', ])
rows = []
for k_techid, v_info in future_techs_sol_resource_options.items():
rows.append({
'clews_id': k_techid,
'name': v_info['name'],
'operational_life': v_info['operational_life'],
'start_year': v_info['start_year'],
'potential_capacity_GW': v_info['potential'],
'invested_capacity_GW': PWR_capacity_year[PWR_capacity_year['TECHNOLOGY'] == k_techid].VALUE.values[0] if k_techid in PWR_capacity_year['TECHNOLOGY'].values else 0 ,
'clews_investment_year': year
})
future_techs_sol_mapping = pd.concat([future_techs_sol_mapping, pd.DataFrame(rows)], ignore_index=True)
future_techs_sol_mapping.set_index('name',inplace=True)
# Exclude matching columns from `future_techs_sol_mapping`
non_overlapping_cols = future_techs_sol_mapping.loc[:, ~future_techs_sol_mapping.columns.isin(resource_options_solar.columns)]
# Perform the join
resource_options_solar = resource_options_solar.join(non_overlapping_cols, how='left')
resource_options_solar.to_csv(f'../data/processed_data/solar/potential/resource_options_investments_solar_{year}.csv')
/tmp/ipykernel_126288/4087257387.py:14: FutureWarning:
The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.
[ ]:
future_techs_wnd_mapping = pd.DataFrame(columns=['clews_id', 'name', 'operational_life', 'start_year', 'potential_capacity_GW','clews_investment_year','invested_capacity_GW', ])
rows = []
for k_techid, v_info in future_techs_wnd_resource_options.items():
rows.append({
'clews_id': k_techid,
'name': v_info['name'],
'operational_life': v_info['operational_life'],
'start_year': v_info['start_year'],
'potential_capacity_GW': v_info['potential'],
'invested_capacity_GW': PWR_capacity_year[PWR_capacity_year['TECHNOLOGY'] == k_techid].VALUE.values[0] if k_techid in PWR_capacity_year['TECHNOLOGY'].values else 0 ,
'clews_investment_year': year
})
future_techs_wnd_mapping = pd.concat([future_techs_wnd_mapping, pd.DataFrame(rows)], ignore_index=True)
future_techs_wnd_mapping.set_index('name',inplace=True)
# Exclude matching columns from `future_techs_sol_mapping`
non_overlapping_cols = future_techs_wnd_mapping.loc[:, ~future_techs_wnd_mapping.columns.isin(resource_options_wind.columns)]
# Perform the join
resource_options_wind = resource_options_wind.join(non_overlapping_cols, how='left')
resource_options_wind.to_csv(f'../data/processed_data/wind/potential/resource_options_investments_wind_{year}.csv')
/tmp/ipykernel_126288/732904445.py:14: FutureWarning:
The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.