From 1b6dd72bbd3a42a5bb3e52b383cdd5f3a55b010f Mon Sep 17 00:00:00 2001 From: Jean-Didier Date: Tue, 3 Jan 2023 20:10:50 +0100 Subject: [PATCH] first working deployment version --- grouping/PPO_cg.py | 1624 ++++++++--------- polymorphic_solver/src/api.py | 2 +- polymorphic_solver/src/app.py | 28 +- polymorphic_solver/src/morphemic.py | 58 +- ...SpringRestExample-1.0.0-BUILD-SNAPSHOT.war | Bin 23050261 -> 23049514 bytes 5 files changed, 860 insertions(+), 852 deletions(-) diff --git a/grouping/PPO_cg.py b/grouping/PPO_cg.py index cdd42d35..121749ff 100644 --- a/grouping/PPO_cg.py +++ b/grouping/PPO_cg.py @@ -37,894 +37,859 @@ uri_notification = None request_type = None full_filepath = None -components_indexes_map = {} - -def get_component_name_from_solutions(solutions): - global components_indexes_map - result = [] - for solution in solutions: - solution_with_name = [] - for component_index in solution: - solution_with_name.append(components_indexes_map[component_index-1]) - result.append(solution_with_name) - return result - -def groupings(file_): - global filename_ - filename_ = file_ - - visitedList = [[]] - - - #env variables that are used inside the program - activemq_hostname = os.environ.get("ACTIVEMQ_HOST", "localhost") - activemq_port = int(os.environ.get("ACTIVEMQ_PORT", "61613")) - activemq_username = os.environ.get("ACTIVEMQ_USERNAME", "morphemic") - activemq_password = os.environ.get("ACTIVEMQ_PASSWORD", "morphemic") - amq_topic_to_send = os.environ.get("AMQ_TOPIC_REQUEST_CG", "/topic/solver_ug_request") - amq_topic_utility_receive = os.environ.get("AMQ_TOPIC_CG_REPLY", "/topic/solver_ug_reply") - - def create_input_utility(sol_1): - - resources_values=[] - resources_components=[('CPU', 'GPU', 'FPGA', 'RAM')] - total_resources=[] - list_to_tuple=[] - r_f_list=[] - cpu, ram = [],[] - cut_=[] - CPU, GPU, FPGA, RAM = 0,0,0,0 - for element in range(len(app.data['Application_components'])): - for e in app.data['Application_components'][element]['Resources']: - for k,v in e.items(): - if k == 'CPU': - CPU= int(list(v[1].values())[0]) - elif k == 'GPU': - GPU=int(list(v[1].values())[0]) - elif k == 'FPGA': - FPGA=int(list(v[1].values())[0]) - elif k == 'RAM': - RAM=int(list(v[1].values())[0]) - - total_resources.append((CPU, GPU, FPGA, RAM)) - CPU, GPU, FPGA, RAM = 0,0,0,0 +def set_component_names_from_solutions(filename_, solutions): - for element in total_resources: - list_to_tuple.append(list(element)) - + with open(filename_, 'r+') as f: + data = json.load(f) - for element in sol_1: - for idx,e in enumerate(element): - cpu.append(list_to_tuple[e-1][0]) - ram.append(list_to_tuple[e-1][3]) - + no_of_components = data['Number_of_components'] + component_index_map={} + name_of_components=[] + for element in range(no_of_components): + name_of_components.append(data['Application_components'][element]['name']) + i=0 - for i in range(len(element)): + for i in range(no_of_components): + component_index_map.update({i+1:name_of_components[i]}) - r_f_list.append({'_cores':max(cpu)}) - r_f_list.append({'_mem': max(ram)}) - r_f_list.append({'_instances': int(list(list(app.data['Application_components'][i]['Horizontal scaling requirements'][0].values())[0][1].values())[0])}) - r_f_list.append({'_variant': "[]"}) - r_f_list.append({'_hardware':'CPU'}) - - cut_.append(r_f_list) - r_f_list=[] - cpu, ram = [], [] - + for element in solutions: + for idx,e in enumerate(element): + element[idx] = component_index_map[e] + return solutions + +def groupings(file_): + time.sleep(1200) - return total_resources, {'target': 'utility', 'sender_id': get_random_ug_id(), 'combination': sol_1, 'variables': cut_[0]} - - def hardware_solutions(solutions): - keep_resources_gpu=[] - keep_resources_fpga=[] - for element in solutions: - total_resources= create_input_utility(element)[0] - for e in element: - if len(e) ==1: - if total_resources[e[0]-1][1] !=0: - keep_resources_gpu.append(element) - for e in element: - if len(e) ==1: - if total_resources[e[0]-1][2] !=0: - keep_resources_fpga.append(element) - return keep_resources_gpu, keep_resources_fpga - + while True: + global filename_ + filename_ = file_ - def sorted_list(list_solutions): - list_to_be_changed=[] - solutions=[] - final_solutions=[] - for idx,element in enumerate(list_solutions): - for e in element: - e.sort() - list_to_be_changed.append(e) - solutions.append(list_to_be_changed) - list_to_be_changed=[] - - for idx, element in enumerate(solutions): - x = sorted(element, key=lambda x: x[0]) - final_solutions.append(x) + visitedList = [[]] - return final_solutions - def create_map_actions(solutions): - solutions_string_type=[] - solution_number=[] - for idx,element in enumerate(solutions): + #env variables that are used inside the program + activemq_hostname = os.environ.get("ACTIVEMQ_HOST", "localhost") + activemq_port = int(os.environ.get("ACTIVEMQ_PORT", "61613")) + activemq_username = os.environ.get("ACTIVEMQ_USERNAME", "morphemic") + activemq_password = os.environ.get("ACTIVEMQ_PASSWORD", "morphemic") + amq_topic_to_send = os.environ.get("AMQ_TOPIC_REQUEST_CG", "/topic/solver_ug_request") + amq_topic_utility_receive = os.environ.get("AMQ_TOPIC_CG_REPLY", "/topic/solver_ug_reply") - solution_number.append(idx) - solutions_string_type.append(str(element)) + def create_input_utility(sol_1): - return solution_number, solutions_string_type + resources_values=[] + resources_components=[('CPU', 'GPU', 'FPGA', 'RAM')] + total_resources=[] + list_to_tuple=[] + r_f_list=[] + cpu, ram = [],[] + cut_=[] + CPU, GPU, FPGA, RAM = 0,0,0,0 + for element in range(len(app.data['Application_components'])): + for e in app.data['Application_components'][element]['Resources']: + for k,v in e.items(): + if k == 'CPU': + CPU= int(list(v[1].values())[0]) + elif k == 'GPU': + GPU=int(list(v[1].values())[0]) + elif k == 'FPGA': + FPGA=int(list(v[1].values())[0]) + elif k == 'RAM': + RAM=int(list(v[1].values())[0]) + + total_resources.append((CPU, GPU, FPGA, RAM)) + CPU, GPU, FPGA, RAM = 0,0,0,0 - def create_str_groupings(additional_resources): - for idx, element in enumerate(additional_resources): - additional_resources[idx] = str(element) - - return additional_resources - - def remove_duplicates(x): - return list(dict.fromkeys(x)) - - - def MAP_ACTIONS_RESOURCES(solutions): - initial_actions = [] - code_actions=[] - cpu_combinations = [] - total_combinations= [] - for idx, element in enumerate(solutions): - total_resources = create_input_utility(element)[0] - combination_input_utility = create_input_utility(element)[1] - cpu_combinations.append((idx, combination_input_utility)) - gpu_solutions, fpga_solutions = hardware_solutions(solutions) - solutions = sorted_list(solutions) - gpu_solutions = sorted_list(gpu_solutions) - gpu_solutions = create_str_groupings(gpu_solutions) - gpu_solutions = remove_duplicates(gpu_solutions) - action_numbr_list, groupings_list = create_map_actions(solutions) - number_of_actions, original_groupings = create_map_actions(solutions) - for element in original_groupings: - if gpu_solutions!=None: - for e in gpu_solutions: - if e == element: - idx = original_groupings.index(element) - - new_action = create_input_utility(ast.literal_eval(original_groupings[idx]))[1] - combination = new_action['combination'] - - - for element in combination: - if len(element) ==1: - index_ = element[0]-1 - if total_resources[index_][1]!=0: - - gpu_value = total_resources[index_][1] - cores_ = new_action['variables'][element[0]-1][0] - hardware_ = new_action['variables'][element[0]-1][4] - cores_['_cores'] = gpu_value - hardware_['_hardware'] = "GPU" - total_combinations.append(new_action) - new_action=None - - if fpga_solutions!=None: - for e in fpga_solutions: - if e == element: - idx = original_groupings.index(element) - - new_action = create_input_utility(ast.literal_eval(original_groupings[idx])) - - - for element in combination: - if len(element) ==1: - index_ = element[0]-1 - if total_resources[index_][2]!=0: - fpga_value = total_resources[index_][1] - cores_ = new_action['variables'][element[0]-1][0] - hardware_ = new_action['variables'][element[0]-1][4] - cores_['_cores'] = fpga_value - hardware_['_hardware'] = "FPGA" - total_combinations.append(new_action) - - for solution in solutions: - initial_actions.append(create_input_utility(solution)[1]) - - total_actions = initial_actions+total_combinations - - for i in range(len(total_actions)): - code_actions.append(i) - - return total_actions, code_actions - - - def check_uniquenss(List): - result = all(element == 1 for element in List) - if (result): - res_ = True - else: - res_ = False - - return res_ - - def filter_dfs(visited_list): - new_x = [] - added_x = [] - combinations_all_len=[] - differences_=[] - indexes=[] - final_workflow_rule = [] - rules=[] - ff_rules=[] - - for element in visited_list: - if len(element)>1: - for e_ in element: - added_x.append(e_+1) - new_x.append(added_x) - added_x=[] - else: - pass + for element in total_resources: + list_to_tuple.append(list(element)) + - for j in new_x: - len_ = len(j) - for i in range(2,len_): - lst_ = list(combinations(j, i)) - for element in lst_: - combinations_all_len.append(list(element)) + for element in sol_1: + for idx,e in enumerate(element): + cpu.append(list_to_tuple[e-1][0]) + ram.append(list_to_tuple[e-1][3]) + + for i in range(len(element)): - for element in combinations_all_len: - differences_.append(list(diff(element))) - - for idx,e in enumerate(differences_): - if len(e)==1 and e[0]==1: - indexes.append(idx) - elif len(e)>1 and check_uniquenss(e) == True: - indexes.append(idx) - for element in indexes: - final_workflow_rule.append(combinations_all_len[element]) - for element in new_x: - rules.append(str(element)) - for element in final_workflow_rule: - rules.append(str(element)) - mylist = list( dict.fromkeys(rules)) - for element in mylist: - ff_rules.append(ast.literal_eval(element)) - - return ff_rules + r_f_list.append({'_cores':max(cpu)}) + r_f_list.append({'_mem': max(ram)}) + r_f_list.append({'_instances': int(list(list(app.data['Application_components'][i]['Horizontal scaling requirements'][0].values())[0][1].values())[0])}) + r_f_list.append({'_variant': "[]"}) + r_f_list.append({'_hardware':'CPU'}) + + cut_.append(r_f_list) + r_f_list=[] + cpu, ram = [], [] + + return total_resources, {'target': 'utility', 'sender_id': get_random_ug_id(), 'combination': sol_1, 'variables': cut_} + + def hardware_solutions(solutions): + keep_resources_gpu=[] + keep_resources_fpga=[] + for element in solutions: + total_resources= create_input_utility(element)[0] + for e in element: + if len(e) ==1: + if total_resources[e[0]-1][1] !=0: + keep_resources_gpu.append(element) + for e in element: + if len(e) ==1: + if total_resources[e[0]-1][2] !=0: + keep_resources_fpga.append(element) + return keep_resources_gpu, keep_resources_fpga + - class Listener_(stomp.ConnectionListener): + def sorted_list(list_solutions): + list_to_be_changed=[] + solutions=[] + final_solutions=[] + for idx,element in enumerate(list_solutions): + for e in element: + e.sort() + list_to_be_changed.append(e) + solutions.append(list_to_be_changed) + list_to_be_changed=[] + + for idx, element in enumerate(solutions): + x = sorted(element, key=lambda x: x[0]) + final_solutions.append(x) - def __init__(self): - self.message_list=[] + return final_solutions - def on_error(self, headers, message): - print('received an error "%s"' % message) - def on_message(self, headers, message): - self.message_list.append(message) + def create_map_actions(solutions): + solutions_string_type=[] + solution_number=[] + for idx,element in enumerate(solutions): - def amq_send_receive(data_): - connected = False - while not connected: - try: - hosts = [(activemq_hostname, activemq_port)] - conn = stomp.Connection(host_and_ports=hosts) - listener_ = Listener_() - conn.set_listener('', listener_) - conn.connect(activemq_username, activemq_password, wait=True) - connected = True - except Exception as e: - print("Connection failed, process will retry in 5 seconds") - time.sleep(5) - # Register a subscriber with ActiveMQ. This tells ActiveMQ to send - # all messages received on the topic 'topic-1' to this listener - try: - conn.subscribe(destination=amq_topic_to_send, ack='auto') - time.sleep(1) - conn.send(body=json.dumps(data_), destination=amq_topic_to_send) - time.sleep(1) - conn.disconnect() - conn.connect(activemq_username, activemq_password, wait=True) - conn.subscribe(destination=amq_topic_utility_receive, ack='auto') - time.sleep(5) - while True: - for element in listener_.message_list: - e_ = json.loads(element) - if e_['sender_id'] == data_['sender_id']: - result = e_["utility"] - conn.disconnect() - break - else: - continue - except: - print("Could not connect to the ACTIVEMQ Server") - result = None - - return result - - #function to assign a unique id to each request in utility generator - def get_random_ug_id(): - # choose from all lowercase letter - characters = string.ascii_letters + string.digits - password = ''.join(random.choice(characters) for i in range(8)) - return password - - #function to retrive the data from the .json file produced by the WF Analyzer - def export_data(): - ''' - This function is used to acquire the data fromcd Pro - the json file - Returns: the data/ the content - ''' - #final_path = os.path.join(os.getcwd(), file_) - with open(filename_, 'r+') as f: - data = json.load(f) - return data - - - #function to create all the possible paths from the application's workflow - visitedList = [[]] - def depthFirst(graph, currentVertex, visited): - visited.append(currentVertex) - for vertex in graph[currentVertex]: - if vertex not in visited: - depthFirst(graph, vertex, visited.copy()) - visitedList.append(visited) - return visitedList - - - #function to split the relative paths to smaller ones - def split_list(alist, wanted_parts=1): - length = len(alist) - return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts] - for i in range(wanted_parts) ] - - #function for supporting to filter combinations based on the combinations derived from the application's workflow - def intersection(lst1, lst2): - return list(set(lst1) & set(lst2)) - - #function to create all the possible combinations in lists based the nummber of elements in a list - def combination_per_len(components,r): - combinations_ = list(combinations(components, r)) - for idx, element in enumerate(combinations_): - combinations_[idx] = list(element) - return combinations_ - - - - ############################################## - ''' THE ENTIRE FUNCTIONALITY FOR COMMUNICATION BETWEEN CG AND UTILITY GENERATOR - - reads the json file from the WF Analyzer and exploits it - - create the app workflow graph and creates the filter for combinations - - usage of dlx algorithms to find the correct combinations - - creates the resources of each combinations and produces the input for th UG''' - - class Application_model: - def __init__(self): - self.data = export_data() - self.application_components = self.data['Number_of_components'] # i.e. 2 if the App has two Components - self.app_list = [i+1 for i in range(self.application_components)] # [1,2,...,n] - self.app_worfklow = self.data['Application_graph'] #to retrieve the application's flow - self.dict_cpus = {} - self.dict_ram = {} - self.dict_hardware={} - self.dict_variant={} - self.dict_instances={} - self.resources={} - self.final_resources=[] #[{'Component': 1, 'CORES': 1, 'RAM': 8100}, {'Component': 2, 'CORES': 1, 'RAM': 1024}] - self.solutions= [] #[[[1], [2]], [[1, 2]]] all the combinations - self.resource_combination = None - self.str_actions = None #['[[1], [2]]', '[[1, 2]]'] - self.workflow_rules=None - self.no_of_actions = None - - def transform_workflow(self): - new_dict,dict_ = {}, {} - component_name_list,list_with_keys = [], [] - for idx,item in enumerate(self.data['Application_graph']): - new_dict[list(item.keys())[0]] = list(item.values())[0] - - for idx in range(self.data['Number_of_components']): - component_name_list.append(self.data['Application_components'][idx]['name']) - original_values = list(new_dict.values()) - for element in original_values: - for id_, each_element in enumerate(element): - for idx_, e in enumerate(component_name_list): - if each_element == e: - element[id_] = idx_ - for i in range(0,len(original_values)): - list_with_keys.append(str(i)) - for idx,element in enumerate(component_name_list): - dict_.update({idx: original_values[idx]}) - - return dict_ + solution_number.append(idx) + solutions_string_type.append(str(element)) - def set_actions(self): - self.get_workflow() - self.create_combinations() - self.get_combinations() - self.no_of_actions = len(self.solutions) + return solution_number, solutions_string_type - def get_default_conf(self): - return [[i+1] for i in range(self.application_components)] + def create_str_groupings(additional_resources): + for idx, element in enumerate(additional_resources): + additional_resources[idx] = str(element) + + return additional_resources + + def remove_duplicates(x): + return list(dict.fromkeys(x)) + + + def MAP_ACTIONS_RESOURCES(solutions): + initial_actions = [] + code_actions=[] + cpu_combinations = [] + total_combinations= [] + for idx, element in enumerate(solutions): + total_resources = create_input_utility(element)[0] + combination_input_utility = create_input_utility(element)[1] + cpu_combinations.append((idx, combination_input_utility)) + gpu_solutions, fpga_solutions = hardware_solutions(solutions) + solutions = sorted_list(solutions) + gpu_solutions = sorted_list(gpu_solutions) + gpu_solutions = create_str_groupings(gpu_solutions) + gpu_solutions = remove_duplicates(gpu_solutions) + action_numbr_list, groupings_list = create_map_actions(solutions) + number_of_actions, original_groupings = create_map_actions(solutions) + for element in original_groupings: + if gpu_solutions!=None: + for e in gpu_solutions: + if e == element: + idx = original_groupings.index(element) + + new_action = create_input_utility(ast.literal_eval(original_groupings[idx]))[1] + combination = new_action['combination'] + + + for element in combination: + if len(element) ==1: + index_ = element[0]-1 + if total_resources[index_][1]!=0: + + gpu_value = total_resources[index_][1] + cores_ = new_action['variables'][element[0]-1][0] + hardware_ = new_action['variables'][element[0]-1][4] + cores_['_cores'] = gpu_value + hardware_['_hardware'] = "GPU" + total_combinations.append(new_action) + new_action=None + + if fpga_solutions!=None: + for e in fpga_solutions: + if e == element: + idx = original_groupings.index(element) + + new_action = create_input_utility(ast.literal_eval(original_groupings[idx])) + + + for element in combination: + if len(element) ==1: + index_ = element[0]-1 + if total_resources[index_][2]!=0: + fpga_value = total_resources[index_][1] + cores_ = new_action['variables'][element[0]-1][0] + hardware_ = new_action['variables'][element[0]-1][4] + cores_['_cores'] = fpga_value + hardware_['_hardware'] = "FPGA" + total_combinations.append(new_action) + + for solution in solutions: + initial_actions.append(create_input_utility(solution)[1]) + + total_actions = initial_actions+total_combinations - def reset_resources(self): - self.dict_cpus={} - self.dict_ram={} - self.dict_hardware={} - self.dict_variant={} + for i in range(len(total_actions)): + code_actions.append(i) - - def get_workflow(self): - graph = {'graph': self.transform_workflow()} - graph = {int(k):[int(i) for i in v] for k,v in graph['graph'].items()} - list_=[] - visitedList = depthFirst(graph, 0, []) - self.workflow_rules = filter_dfs(visited_list=visitedList) - time.sleep(5) - - def combination_list(self): - final_list=[] - self.get_workflow() - final_list.append(self.app_list) - for element in self.app_list: - final_list.append([element]) - - for r in range(2, len(self.app_list)): - combinations_ = combination_per_len(self.app_list, r) + return total_actions, code_actions - for element in combinations_: - for rule in self.workflow_rules: - if intersection(element, rule) == rule and len(element) == len(rule): - final_list.append(element) - return final_list + def check_uniquenss(List): + result = all(element == 1 for element in List) + if (result): + res_ = True + else: + res_ = False + return res_ - def create_combinations(self): - return self.final_resources - - def get_combinations(self): - - id = [i+1 for i in range(self.application_components)] - combinations = self.combination_list() - X = id - Y = makeY_(final_list(X, combinations)) - X_set = prepareX(X, Y) - self.solutions = find_solutions(X_set, Y) - return self.solutions - - def filter_node_candidates(self): - pass + def filter_dfs(visited_list): + new_x = [] + added_x = [] + combinations_all_len=[] + differences_=[] + indexes=[] + final_workflow_rule = [] + rules=[] + ff_rules=[] - def create_single_resources(self, i, resource): - mid_cores_list,mid_mem_list,f_list,list_with_min=[],[],[],[] - for element in self.solutions[i]: + for element in visited_list: if len(element)>1: - for e in element: - idx= e-1 - mid_cores_list.append(self.final_resources[idx][resource]) - f_list.append(mid_cores_list) - mid_cores_list=[] + for e_ in element: + added_x.append(e_+1) + new_x.append(added_x) + added_x=[] else: - idx=element[0]-1 + pass - mid_cores_list.append(self.final_resources[idx][resource]) - f_list.append(mid_cores_list) - mid_cores_list=[] - for element in f_list: + for j in new_x: + len_ = len(j) + for i in range(2,len_): + lst_ = list(combinations(j, i)) + for element in lst_: + combinations_all_len.append(list(element)) - min_=0 - if len(element)>1: - min_=min(element) - for i in range(len(element)): - list_with_min.append(min_) - - else: - list_with_min.append(element[0]) - - return list_with_min - - - ''' RL CUSTOM ENVIRONMENT - - ACTIONS: {"0": [[[1,2]], "1": [[1], [2]]]} (i.e. in an app with 2 components - - OBSERVATION SPACE: 1D, TAKES THE UTILITY VALUES FOR EACH COMBINATION - - INITIAL ACTION :0 - ''' - - class Application_Env(Env): - def __init__(self): - self.application=Application_model() - self.action_space = Discrete(actions) # number of possible actions - self.observation_space = Box(low = np.array([11]), high = np.array([200]), dtype=np.int64) - self.utility_state = 0 - #self.utility_interactor = UtilityInteractor() - self.action_masked = [i for i in range(len(self.application.get_combinations()))] - self.original_actions = [i for i in self.application.get_combinations()] - self.initial_action = self.application.get_default_conf() - self.masked_initial_action = [len(i) for i in self.original_actions].index(max([len(i) for i in self.original_actions])) - self.list_of_inputs= [] - self.metrics_to_predict = None - - def prepare_inputs(self): - self.application.get_workflow() - self.application.create_combinations() - self.application.get_combinations() - combin_ = MAP_ACTIONS_RESOURCES(self.application.solutions)[0] - self.list_of_actions=MAP_ACTIONS_RESOURCES(self.application.solutions)[1] - self.list_of_inputs = combin_ - - - def get_performance(self, action): - data = self.list_of_inputs[action] - #return data,random.uniform(0.1, 0.9) - print("Data sent -> ",data) - return self.utility_interactor.getPerformance(data) - #return random.uniform(0.1, 0.9) - """ - while True: - utility_value = amq_topic_utility_receive(data, action) - if utility_value != None: - break - else: - continue - return utility_value""" - - def step(self, action): + for element in combinations_all_len: + differences_.append(list(diff(element))) + + for idx,e in enumerate(differences_): + if len(e)==1 and e[0]==1: + indexes.append(idx) + elif len(e)>1 and check_uniquenss(e) == True: + indexes.append(idx) + for element in indexes: + final_workflow_rule.append(combinations_all_len[element]) + for element in new_x: + rules.append(str(element)) + for element in final_workflow_rule: + rules.append(str(element)) + mylist = list( dict.fromkeys(rules)) + for element in mylist: + ff_rules.append(ast.literal_eval(element)) + + return ff_rules - print('ACTION:{}'.format(self.list_of_actions[action])) - input_ = self.get_performance(action) - print(input_) - ''' - JD only to check if it is working - in this part the ulitity should work instead of a random or the - simulated application + + #function to assign a unique id to each request in utility generator + def get_random_ug_id(): + # choose from all lowercase letter + characters = string.ascii_letters + string.digits + password = ''.join(random.choice(characters) for i in range(8)) + return password + + #function to retrive the data from the .json file produced by the WF Analyzer + def export_data(): ''' - #input_, utility = self.get_performance(action) - #util = UtilitySimulator(input_) - #util.generateLoad(lower_bound=11, upper_bound=200) - #load_ = util.load - #util.computePerformance() - #application_performance = util.getPerfomance() - application_performance = random.randint(1,300) - scale_app_performance = (application_performance-0.05)/(300-0.05) - - if scale_app_performance<0: - scale_app_performance = 0 - else: - pass - print('performance based on utility: {}'.format(scale_app_performance)) - if scale_app_performance>=0.7: - reward=1 - done=True - else: - reward = -1 - done=True - info={} - self.state = random.uniform(0.1, 0.9) + This function is used to acquire the data fromcd Pro + the json file + Returns: the data/ the content ''' - if self.state>200: - self.state=200 - elif self.state<11: - self.state=11 - else: - pass - ''' - return self.state, reward, done, info, self.state - - def reset(self): - self.state= 0 - self.initial_action = self.action_space.sample() - self.initial_mask_action= [len(i) for i in self.original_actions].index(max([len(i) for i in self.original_actions])) - return self.state + #final_path = os.path.join(os.getcwd(), file_) + with open(filename_, 'r+') as f: + data = json.load(f) + return data + + + #function to create all the possible paths from the application's workflow + visitedList = [[]] + def depthFirst(graph, currentVertex, visited): + visited.append(currentVertex) + for vertex in graph[currentVertex]: + if vertex not in visited: + depthFirst(graph, vertex, visited.copy()) + visitedList.append(visited) + return visitedList + + + #function to split the relative paths to smaller ones + def split_list(alist, wanted_parts=1): + length = len(alist) + return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts] + for i in range(wanted_parts) ] + + #function for supporting to filter combinations based on the combinations derived from the application's workflow + def intersection(lst1, lst2): + return list(set(lst1) & set(lst2)) + + #function to create all the possible combinations in lists based the nummber of elements in a list + def combination_per_len(components,r): + combinations_ = list(combinations(components, r)) + for idx, element in enumerate(combinations_): + combinations_[idx] = list(element) + return combinations_ + + + + ############################################## + ''' THE ENTIRE FUNCTIONALITY FOR COMMUNICATION BETWEEN CG AND UTILITY GENERATOR + - reads the json file from the WF Analyzer and exploits it + - create the app workflow graph and creates the filter for combinations + - usage of dlx algorithms to find the correct combinations + - creates the resources of each combinations and produces the input for th UG''' + + class Application_model: + def __init__(self): + self.data = export_data() + self.application_components = self.data['Number_of_components'] # i.e. 2 if the App has two Components + self.app_list = [i+1 for i in range(self.application_components)] # [1,2,...,n] + self.app_worfklow = self.data['Application_graph'] #to retrieve the application's flow + self.dict_cpus = {} + self.dict_ram = {} + self.dict_hardware={} + self.dict_variant={} + self.dict_instances={} + self.resources={} + self.final_resources=[] #[{'Component': 1, 'CORES': 1, 'RAM': 8100}, {'Component': 2, 'CORES': 1, 'RAM': 1024}] + self.solutions= [] #[[[1], [2]], [[1, 2]]] all the combinations + self.resource_combination = None + self.str_actions = None #['[[1], [2]]', '[[1, 2]]'] + self.workflow_rules=None + self.no_of_actions = None + + def transform_workflow(self): + new_dict,dict_ = {}, {} + component_name_list,list_with_keys = [], [] + for idx,item in enumerate(self.data['Application_graph']): + new_dict[list(item.keys())[0]] = list(item.values())[0] + + for idx in range(self.data['Number_of_components']): + component_name_list.append(self.data['Application_components'][idx]['name']) + original_values = list(new_dict.values()) + for element in original_values: + for id_, each_element in enumerate(element): + for idx_, e in enumerate(component_name_list): + if each_element == e: + element[id_] = idx_ + for i in range(0,len(original_values)): + list_with_keys.append(str(i)) + for idx,element in enumerate(component_name_list): + dict_.update({idx: original_values[idx]}) + + return dict_ + def set_actions(self): + self.get_workflow() + self.create_combinations() + self.get_combinations() + self.no_of_actions = len(self.solutions) - def discounted_cumulative_sums(x, discount): - # Discounted cumulative sums of vectors for computing rewards-to-go and advantage estimates - return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1] + def get_default_conf(self): + return [[i+1] for i in range(self.application_components)] + def reset_resources(self): + self.dict_cpus={} + self.dict_ram={} + self.dict_hardware={} + self.dict_variant={} - class Buffer: - # Buffer for storing trajectories - def __init__(self, observation_dimensions, size, gamma=0.99, lam=0.95): - # Buffer initialization - self.observation_buffer = np.zeros( - (size, observation_dimensions), dtype=np.float32 - ) - self.action_buffer = np.zeros(size, dtype=np.int32) - self.advantage_buffer = np.zeros(size, dtype=np.float32) - self.reward_buffer = np.zeros(size, dtype=np.float32) - self.return_buffer = np.zeros(size, dtype=np.float32) - self.value_buffer = np.zeros(size, dtype=np.float32) - self.logprobability_buffer = np.zeros(size, dtype=np.float32) - self.gamma, self.lam = gamma, lam - self.pointer, self.trajectory_start_index = 0, 0 - - def store(self, observation, action, reward, value, logprobability): - # Append one step of agent-environment interaction - self.observation_buffer[self.pointer] = observation - self.action_buffer[self.pointer] = action - self.reward_buffer[self.pointer] = reward - self.value_buffer[self.pointer] = value - self.logprobability_buffer[self.pointer] = logprobability - self.pointer += 1 - - def finish_trajectory(self, last_value=0): - # Finish the trajectory by computing advantage estimates and rewards-to-go - path_slice = slice(self.trajectory_start_index, self.pointer) - rewards = np.append(self.reward_buffer[path_slice], last_value) - values = np.append(self.value_buffer[path_slice], last_value) - - deltas = rewards[:-1] + self.gamma * values[1:] - values[:-1] - - self.advantage_buffer[path_slice] = discounted_cumulative_sums( - deltas, self.gamma * self.lam - ) - self.return_buffer[path_slice] = discounted_cumulative_sums( - rewards, self.gamma - )[:-1] - - self.trajectory_start_index = self.pointer - - def get(self): - # Get all data of the buffer and normalize the advantages - self.pointer, self.trajectory_start_index = 0, 0 - advantage_mean, advantage_std = ( - np.mean(self.advantage_buffer), - np.std(self.advantage_buffer), - ) - self.advantage_buffer = (self.advantage_buffer - advantage_mean) / advantage_std - return ( - self.observation_buffer, - self.action_buffer, - self.advantage_buffer, - self.return_buffer, - self.logprobability_buffer, - ) + + def get_workflow(self): + graph = {'graph': self.transform_workflow()} + graph = {int(k):[int(i) for i in v] for k,v in graph['graph'].items()} + list_=[] + visitedList = depthFirst(graph, 0, []) + self.workflow_rules = filter_dfs(visited_list=visitedList) + time.sleep(5) + def combination_list(self): + final_list=[] + self.get_workflow() + final_list.append(self.app_list) + for element in self.app_list: + final_list.append([element]) + + for r in range(2, len(self.app_list)): + combinations_ = combination_per_len(self.app_list, r) - def mlp(x, sizes, activation=tf.tanh, output_activation=None): - # Build a feedforward neural network - for size in sizes[:-1]: - x = layers.Dense(units=size, activation=activation)(x) - return layers.Dense(units=sizes[-1], activation=output_activation)(x) + for element in combinations_: + for rule in self.workflow_rules: + if intersection(element, rule) == rule and len(element) == len(rule): + final_list.append(element) + return final_list - def logprobabilities(logits, a): - # Compute the log-probabilities of taking actions a by using the logits (i.e. the output of the actor) - logprobabilities_all = tf.nn.log_softmax(logits) - logprobability = tf.reduce_sum( - tf.one_hot(a, num_actions) * logprobabilities_all, axis=1 - ) - return logprobability + def create_combinations(self): + return self.final_resources + + def get_combinations(self): + + id = [i+1 for i in range(self.application_components)] + combinations = self.combination_list() + X = id + Y = makeY_(final_list(X, combinations)) + X_set = prepareX(X, Y) + self.solutions = find_solutions(X_set, Y) + return self.solutions + + def filter_node_candidates(self): + pass - # Sample action from actor - @tf.function - def sample_action(observation): - logits = actor(observation) - action = tf.squeeze(tf.random.categorical(logits, 1), axis=1) - return logits, action + def create_single_resources(self, i, resource): + mid_cores_list,mid_mem_list,f_list,list_with_min=[],[],[],[] + for element in self.solutions[i]: + if len(element)>1: + for e in element: + idx= e-1 + mid_cores_list.append(self.final_resources[idx][resource]) + f_list.append(mid_cores_list) + mid_cores_list=[] + else: + idx=element[0]-1 + mid_cores_list.append(self.final_resources[idx][resource]) + f_list.append(mid_cores_list) + mid_cores_list=[] + for element in f_list: + + min_=0 + if len(element)>1: + min_=min(element) + for i in range(len(element)): + list_with_min.append(min_) + + else: + list_with_min.append(element[0]) - # Train the policy by maxizing the PPO-Clip objective - @tf.function - def train_policy( - observation_buffer, action_buffer, logprobability_buffer, advantage_buffer - ): + return list_with_min - with tf.GradientTape() as tape: # Record operations for automatic differentiation. - ratio = tf.exp( - logprobabilities(actor(observation_buffer), action_buffer) - - logprobability_buffer - ) - min_advantage = tf.where( - advantage_buffer > 0, - (1 + clip_ratio) * advantage_buffer, - (1 - clip_ratio) * advantage_buffer, - ) - policy_loss = -tf.reduce_mean( - tf.minimum(ratio * advantage_buffer, min_advantage) - ) - policy_grads = tape.gradient(policy_loss, actor.trainable_variables) - policy_optimizer.apply_gradients(zip(policy_grads, actor.trainable_variables)) + ''' RL CUSTOM ENVIRONMENT + - ACTIONS: {"0": [[[1,2]], "1": [[1], [2]]]} (i.e. in an app with 2 components + - OBSERVATION SPACE: 1D, TAKES THE UTILITY VALUES FOR EACH COMBINATION + - INITIAL ACTION :0 + ''' - kl = tf.reduce_mean( - logprobability_buffer - - logprobabilities(actor(observation_buffer), action_buffer) - ) - kl = tf.reduce_sum(kl) - return kl - - - # Train the value function by regression on mean-squared error - @tf.function - def train_value_function(observation_buffer, return_buffer): - with tf.GradientTape() as tape: # Record operations for automatic differentiation. - value_loss = tf.reduce_mean((return_buffer - critic(observation_buffer)) ** 2) - value_grads = tape.gradient(value_loss, critic.trainable_variables) - value_optimizer.apply_gradients(zip(value_grads, critic.trainable_variables)) - - - # Hyperparameters of the PPO algorithm - - - - - # Initialize the environment and get the dimensionality of the - # observation space and the number of possible actions - #filename_ = 'wf.json' - combinations_ = [] - filename_ = file_ - app= Application_model() - app.set_actions() - x = app.solutions - utilities, code_actions = MAP_ACTIONS_RESOURCES(x) - global actions - actions = len(code_actions) - print('Initializing actions......') - - steps_per_epoch = 30*actions - epoch = 20*actions - gamma = 0.99 - clip_ratio = 0.2 - policy_learning_rate = 3e-4 - value_function_learning_rate = 1e-3 - train_policy_iterations = 80 - train_value_iterations = 80 - lam = 0.97 - target_kl = 0.01 - hidden_sizes = (64, 64) - - # True if you want to render the environment - render = False - time.sleep(2) - print('No of possible grouping combinations: {} including all hardware options'.format(actions)) - time.sleep(1) - for utility in utilities: - combinations_.append(utility['combination']) - print('Combinations are: {}'.format(combinations_)) - env = Application_Env() - env.prepare_inputs() - - observation_dimensions = env.observation_space.shape[0] - num_actions = env.action_space.n - list_of_scores=[] - list_of_groupings=[] - - - # Initialize the buffer - buffer = Buffer(observation_dimensions, steps_per_epoch) - - # Initialize the actor and the critic as keras models - observation_input = keras.Input(shape=(observation_dimensions,), dtype=tf.float32) - logits = mlp(observation_input, list(hidden_sizes) + [num_actions], tf.tanh, None) - actor = keras.Model(inputs=observation_input, outputs=logits) - value = tf.squeeze( - mlp(observation_input, list(hidden_sizes) + [1], tf.tanh, None), axis=1 - ) - critic = keras.Model(inputs=observation_input, outputs=value) - - # Initialize the policy and the value function optimizers - policy_optimizer = tf.keras.optimizers.Adam(learning_rate=policy_learning_rate) - value_optimizer = tf.keras.optimizers.Adam(learning_rate=value_function_learning_rate) - - # Initialize the observation, episode return and episode length - observation, episode_return, episode_length,observation = env.reset(), 0, 0, env.reset() - - - list_sum_legth=[] - # Iterate over the number of epochs - for e in range(0,epoch): - #print('EPOCH: {}'.format(epoch)) - #print('epoch {}'.format(epoch)) - # Initialize the sum of the returns, lengths and number of episodes for each epoch - sum_return = 0 - sum_length = 0 - num_episodes = 0 - list_episodes=[] - + class Application_Env(Env): + def __init__(self): + self.application=Application_model() + self.action_space = Discrete(actions) # number of possible actions + self.observation_space = Box(low = np.array([11]), high = np.array([200]), dtype=np.int64) + self.utility_state = 0 + self.utility_interactor = UtilityInteractor() + self.action_masked = [i for i in range(len(self.application.get_combinations()))] + self.original_actions = [i for i in self.application.get_combinations()] + self.initial_action = self.application.get_default_conf() + self.masked_initial_action = [len(i) for i in self.original_actions].index(max([len(i) for i in self.original_actions])) + self.list_of_inputs= [] + self.metrics_to_predict = None + + def prepare_inputs(self): + self.application.get_workflow() + self.application.create_combinations() + self.application.get_combinations() + combin_ = MAP_ACTIONS_RESOURCES(self.application.solutions)[0] + self.list_of_actions=MAP_ACTIONS_RESOURCES(self.application.solutions)[1] + self.list_of_inputs = combin_ + + + def get_performance(self, action): + data = self.list_of_inputs[action] + data['sender_id'] = get_random_ug_id() + print("Data sent -> ",data) + return self.utility_interactor.getPerformance(data) + #return random.uniform(0.1, 0.9) + - # Iterate over the steps of each epoch - for t in range(steps_per_epoch): - #print('step per epoch {}/{}'.format(t, steps_per_epoch)) - #print(t) - if render: - env.render() + def step(self, action): - - # Get the logits, action, and take one step in the environment - observation = np.array([observation]) - observation=observation.reshape(1, -1) - logits, action = sample_action(observation) - observation_new, reward, done, _,observation_new = env.step(action[0].numpy()) - episode_return += reward - episode_length += 1 - - # Get the value and log-probability of the action - value_t = critic(observation) - logprobability_t = logprobabilities(logits, action) - - # Store obs, act, rew, v_t, logp_pi_t - buffer.store(observation, action, reward, value_t, logprobability_t) - + print('ACTION:{}'.format(self.list_of_actions[action])) + data_, utility_ = self.get_performance(action) + print('performance based on utility: {}'.format(utility_)) - # Update the observation - observation = observation_new - - # Finish trajectory if reached to a terminal state - terminal = done - if terminal or (t == steps_per_epoch - 1): - list_of_scores.append((combinations_[action.numpy()[0]],observation,action.numpy()[0])) - last_value = 0 if done else critic(np.array([[observation]])) - buffer.finish_trajectory(last_value) - sum_return += episode_return - sum_length += episode_length - num_episodes += 1 - observation, episode_return, episode_length = env.reset(), 0, 0 - - # Get values from the buffer - ( - observation_buffer, - action_buffer, - advantage_buffer, - return_buffer, - logprobability_buffer, - ) = buffer.get() - - # Update the policy and implement early stopping using KL divergence - for _ in range(train_policy_iterations): - kl = train_policy( - observation_buffer, action_buffer, logprobability_buffer, advantage_buffer + if utility_<0 or utility_==None: + reward = -100 + elif utility_>=0.7: + reward=100 + done=True + elif utility_>=0.5: + reward=1 + done=False + else: + reward = -1 + done=True + info={} + self.state = utility_ + + return self.state, reward, done, info, self.state + + def reset(self): + self.state= 0 + self.initial_action = self.action_space.sample() + self.initial_mask_action= [len(i) for i in self.original_actions].index(max([len(i) for i in self.original_actions])) + return self.state + + + def discounted_cumulative_sums(x, discount): + # Discounted cumulative sums of vectors for computing rewards-to-go and advantage estimates + return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1] + + + class Buffer: + # Buffer for storing trajectories + def __init__(self, observation_dimensions, size, gamma=0.99, lam=0.95): + # Buffer initialization + self.observation_buffer = np.zeros( + (size, observation_dimensions), dtype=np.float32 + ) + self.action_buffer = np.zeros(size, dtype=np.int32) + self.advantage_buffer = np.zeros(size, dtype=np.float32) + self.reward_buffer = np.zeros(size, dtype=np.float32) + self.return_buffer = np.zeros(size, dtype=np.float32) + self.value_buffer = np.zeros(size, dtype=np.float32) + self.logprobability_buffer = np.zeros(size, dtype=np.float32) + self.gamma, self.lam = gamma, lam + self.pointer, self.trajectory_start_index = 0, 0 + + def store(self, observation, action, reward, value, logprobability): + # Append one step of agent-environment interaction + self.observation_buffer[self.pointer] = observation + self.action_buffer[self.pointer] = action + self.reward_buffer[self.pointer] = reward + self.value_buffer[self.pointer] = value + self.logprobability_buffer[self.pointer] = logprobability + self.pointer += 1 + + def finish_trajectory(self, last_value=0): + # Finish the trajectory by computing advantage estimates and rewards-to-go + path_slice = slice(self.trajectory_start_index, self.pointer) + rewards = np.append(self.reward_buffer[path_slice], last_value) + values = np.append(self.value_buffer[path_slice], last_value) + + deltas = rewards[:-1] + self.gamma * values[1:] - values[:-1] + + self.advantage_buffer[path_slice] = discounted_cumulative_sums( + deltas, self.gamma * self.lam + ) + self.return_buffer[path_slice] = discounted_cumulative_sums( + rewards, self.gamma + )[:-1] + + self.trajectory_start_index = self.pointer + + def get(self): + # Get all data of the buffer and normalize the advantages + self.pointer, self.trajectory_start_index = 0, 0 + advantage_mean, advantage_std = ( + np.mean(self.advantage_buffer), + np.std(self.advantage_buffer), + ) + self.advantage_buffer = (self.advantage_buffer - advantage_mean) / advantage_std + return ( + self.observation_buffer, + self.action_buffer, + self.advantage_buffer, + self.return_buffer, + self.logprobability_buffer, + ) + + + def mlp(x, sizes, activation=tf.tanh, output_activation=None): + # Build a feedforward neural network + for size in sizes[:-1]: + x = layers.Dense(units=size, activation=activation)(x) + return layers.Dense(units=sizes[-1], activation=output_activation)(x) + + + def logprobabilities(logits, a): + # Compute the log-probabilities of taking actions a by using the logits (i.e. the output of the actor) + logprobabilities_all = tf.nn.log_softmax(logits) + logprobability = tf.reduce_sum( + tf.one_hot(a, num_actions) * logprobabilities_all, axis=1 ) - if kl > 1.5 * target_kl: - # Early Stopping - break + return logprobability + + + # Sample action from actor + @tf.function + def sample_action(observation): + logits = actor(observation) + action = tf.squeeze(tf.random.categorical(logits, 1), axis=1) + return logits, action + + + # Train the policy by maxizing the PPO-Clip objective + @tf.function + def train_policy( + observation_buffer, action_buffer, logprobability_buffer, advantage_buffer + ): + + with tf.GradientTape() as tape: # Record operations for automatic differentiation. + ratio = tf.exp( + logprobabilities(actor(observation_buffer), action_buffer) + - logprobability_buffer + ) + min_advantage = tf.where( + advantage_buffer > 0, + (1 + clip_ratio) * advantage_buffer, + (1 - clip_ratio) * advantage_buffer, + ) + + policy_loss = -tf.reduce_mean( + tf.minimum(ratio * advantage_buffer, min_advantage) + ) + policy_grads = tape.gradient(policy_loss, actor.trainable_variables) + policy_optimizer.apply_gradients(zip(policy_grads, actor.trainable_variables)) + + kl = tf.reduce_mean( + logprobability_buffer + - logprobabilities(actor(observation_buffer), action_buffer) + ) + kl = tf.reduce_sum(kl) + return kl + + + # Train the value function by regression on mean-squared error + @tf.function + def train_value_function(observation_buffer, return_buffer): + with tf.GradientTape() as tape: # Record operations for automatic differentiation. + value_loss = tf.reduce_mean((return_buffer - critic(observation_buffer)) ** 2) + value_grads = tape.gradient(value_loss, critic.trainable_variables) + value_optimizer.apply_gradients(zip(value_grads, critic.trainable_variables)) + + + # Hyperparameters of the PPO algorithm + + + + + # Initialize the environment and get the dimensionality of the + # observation space and the number of possible actions + #filename_ = 'wf.json' + combinations_ = [] + filename_ = file_ + app= Application_model() + app.set_actions() + x = app.solutions + utilities, code_actions = MAP_ACTIONS_RESOURCES(x) + time.sleep(5) + global actions + actions = len(code_actions) + print('Initializing actions......') + + steps_per_epoch = 30*actions + epoch = 20*actions + gamma = 0.99 + clip_ratio = 0.2 + policy_learning_rate = 3e-4 + value_function_learning_rate = 1e-3 + train_policy_iterations = 80 + train_value_iterations = 80 + lam = 0.97 + target_kl = 0.01 + hidden_sizes = (64, 64) + + # True if you want to render the environment + render = False + print('No of possible grouping combinations: {} including all hardware options'.format(actions)) + time.sleep(1) + for utility in utilities: + combinations_.append(utility['combination']) + print('Combinations are: {}'.format(combinations_)) + env = Application_Env() + env.prepare_inputs() + variables=[] + for id, element in enumerate(env.list_of_inputs): + for idx,e in enumerate(element['variables']): + variables.append({'component': idx+1}) + for dict_ in e: + variables.append(dict_) + env.list_of_inputs[id]['variables'] = variables + + variables=[] + + observation_dimensions = env.observation_space.shape[0] + num_actions = env.action_space.n + list_of_scores=[] + list_of_groupings=[] - # Update the value function - for _ in range(train_value_iterations): - train_value_function(observation_buffer, return_buffer) - # Print mean return and length for each epoch + # Initialize the buffer + buffer = Buffer(observation_dimensions, steps_per_epoch) - print( - f" Epoch: {e + 1}. Mean Return: {sum_return / num_episodes}. Mean Length: {sum_length / num_episodes}" + # Initialize the actor and the critic as keras models + observation_input = keras.Input(shape=(observation_dimensions,), dtype=tf.float32) + logits = mlp(observation_input, list(hidden_sizes) + [num_actions], tf.tanh, None) + actor = keras.Model(inputs=observation_input, outputs=logits) + value = tf.squeeze( + mlp(observation_input, list(hidden_sizes) + [1], tf.tanh, None), axis=1 ) - list_sum_legth.append(sum_length / num_episodes) - list_of_groupings.append(list_of_scores) - list_of_scores=[] + critic = keras.Model(inputs=observation_input, outputs=value) + + # Initialize the policy and the value function optimizers + policy_optimizer = tf.keras.optimizers.Adam(learning_rate=policy_learning_rate) + value_optimizer = tf.keras.optimizers.Adam(learning_rate=value_function_learning_rate) + + # Initialize the observation, episode return and episode length + observation, episode_return, episode_length,observation = env.reset(), 0, 0, env.reset() + + + list_sum_legth=[] + # Iterate over the number of epochs + for e in range(0,epoch): + #print('EPOCH: {}'.format(epoch)) + #print('epoch {}'.format(epoch)) + # Initialize the sum of the returns, lengths and number of episodes for each epoch + sum_return = 0 + sum_length = 0 + num_episodes = 0 + list_episodes=[] + - global full_filepath, app_id, uri_notification, request_type - #print(list_sum_legth) - actions_=[] + # Iterate over the steps of each epoch + for t in range(steps_per_epoch): + #print('step per epoch {}/{}'.format(t, steps_per_epoch)) + #print(t) + if render: + env.render() - performance_m = [] - for element in list_of_groupings: - actions_.append(element[0][0]) + + # Get the logits, action, and take one step in the environment + observation = np.array([observation]) + observation=observation.reshape(1, -1) + logits, action = sample_action(observation) + observation_new, reward, done, _,observation_new = env.step(action[0].numpy()) + episode_return += reward + episode_length += 1 + + # Get the value and log-probability of the action + value_t = critic(observation) + logprobability_t = logprobabilities(logits, action) + + # Store obs, act, rew, v_t, logp_pi_t + buffer.store(observation, action, reward, value_t, logprobability_t) + + + # Update the observation + observation = observation_new + + # Finish trajectory if reached to a terminal state + terminal = done + if terminal or (t == steps_per_epoch - 1): + list_of_scores.append((combinations_[action.numpy()[0]],observation,action.numpy()[0])) + last_value = 0 if done else critic(np.array([[observation]])) + buffer.finish_trajectory(last_value) + sum_return += episode_return + sum_length += episode_length + num_episodes += 1 + observation, episode_return, episode_length = env.reset(), 0, 0 + + # Get values from the buffer + ( + observation_buffer, + action_buffer, + advantage_buffer, + return_buffer, + logprobability_buffer, + ) = buffer.get() + + # Update the policy and implement early stopping using KL divergence + for _ in range(train_policy_iterations): + kl = train_policy( + observation_buffer, action_buffer, logprobability_buffer, advantage_buffer + ) + if kl > 1.5 * target_kl: + # Early Stopping + break - performance_m.append(element[0][1]) + # Update the value function + for _ in range(train_value_iterations): + train_value_function(observation_buffer, return_buffer) + # Print mean return and length for each epoch - max_ = max(performance_m) - print("max: {}".format(max_)) - idx_ = performance_m.index(max_) - grouping = actions_[idx_] - grouping_utility = max_ - print('proposed grouping: {}, calculated utility: {}'.format(grouping, grouping_utility)) - time.sleep(4) - data_to_send_to_ps = {"utility": grouping_utility, "application_id": app_id,"uri_notification": uri_notification, "request_type":request_type, "result": grouping, "json_path": full_filepath} - print(data_to_send_to_ps) + print( + f" Epoch: {e + 1}. Mean Return: {sum_return / num_episodes}. Mean Length: {sum_length / num_episodes}" + ) + list_sum_legth.append(sum_length / num_episodes) + list_of_groupings.append(list_of_scores) + list_of_scores=[] + + global full_filepath, app_id, uri_notification, request_type + #print(list_sum_legth) + actions_=[] + + performance_m = [] + for element in list_of_groupings: + actions_.append(element[0][0]) + + performance_m.append(element[0][1]) + + + max_ = max(performance_m) + print("max: {}".format(max_)) + idx_ = performance_m.index(max_) + grouping = actions_[idx_] + grouping_utility = max_ + print('proposed grouping: {}, calculated utility: {}'.format(set_component_names_from_solutions(filename_,grouping), grouping_utility)) + time.sleep(2) + data_to_send_to_ps = {"utility": grouping_utility, "application_id": app_id,"uri_notification": uri_notification, "request_type":request_type, "result": grouping, "json_path": full_filepath} + print(data_to_send_to_ps) + headers = {"Content-Type":"application/json"} + try: + response = requests.post(url=url_polymorphic_solver,data=json.dumps(data_to_send_to_ps), headers=headers) + print(response) + except Exception as e: + print("Could not reach the polymorphic solver") + + time.sleep(1200) + + +def initial_groupings(full_filepath,app_id,uri_notification,request_type): + initial_combination = [] + initial_utility=0 + with open(full_filepath, 'r+') as f: + data = json.load(f) + Number_components = data['Number_of_components'] + for element in range(1, Number_components+1): + initial_combination.append([element]) + print("Initial grouping {0}".format(set_component_names_from_solutions(full_filepath,initial_combination))) + time.sleep(5) + data_to_send_to_ps = {"utility": initial_utility, "application_id": app_id,"uri_notification": uri_notification, "request_type":request_type, "result": initial_combination, "json_path": full_filepath} + print('data to send to ps: {0}'.format(data_to_send_to_ps)) headers = {"Content-Type":"application/json"} try: response = requests.post(url=url_polymorphic_solver,data=json.dumps(data_to_send_to_ps), headers=headers) print(response) except Exception as e: print("Could not reach the polymorphic solver") - - + app = FastAPI() class RequestWA(BaseModel): @@ -945,31 +910,12 @@ async def grouping(request:RequestWA): full_filepath = config_dir +"/"+ request.filepath request_type = request.request_type uri_notification = request.uri_notification + init_ = Thread(target=initial_groupings, args=(full_filepath,app_id,uri_notification,request_type)) + init_.start() t = Thread(target=groupings, args=(full_filepath,)) t.start() - #solutions_ = groupings(file_=full_filepath) - """ - data_to_send_to_ps = {"utility": solutions_[1], "application_id": app_id,"uri_notification": uri_notification, "request_type":request_type, "result": solutions_[0], "json_path": full_filepath} - headers = {"Content-Type":"application/json"} - try: - response = requests.post(url=url_polymorphic_solver,data=json.dumps(data_to_send_to_ps), headers=headers) - print(response) - except Exception as e: - print("Could not reach the polymorphic solver") """ + return {"status": True} if __name__ == "__main__": - uvicorn.run(app,host="0.0.0.0", port=7474) - -''' -actions_=[] -performances=[] - - -print(list_of_groupings) -for element in list_of_groupings: - actions_.append(element[0]) - performances.append(element[1]) - -print(max(performances)) -''' \ No newline at end of file + uvicorn.run(app,host="0.0.0.0", port=7474) \ No newline at end of file diff --git a/polymorphic_solver/src/api.py b/polymorphic_solver/src/api.py index 788b3016..43c0490b 100644 --- a/polymorphic_solver/src/api.py +++ b/polymorphic_solver/src/api.py @@ -26,7 +26,7 @@ camel_converter_url = os.environ.get("CAME_CONVERTER_URL","http://localhost:7676 defaul_initial_camel_name = "initial_camel_model" class ApplicationDataRequest(BaseModel): - utility: str + utility: float json_path: str application_id: str uri_notification: str diff --git a/polymorphic_solver/src/app.py b/polymorphic_solver/src/app.py index 340f88a4..bec60730 100644 --- a/polymorphic_solver/src/app.py +++ b/polymorphic_solver/src/app.py @@ -203,6 +203,8 @@ class PolymorphicSolver(): self.max_optimization_time = 20 self.last_analysis = time.time() self.agent_consumers = {} + self.rl_engine_started = False + self.first_deployment_completed = False self.program = None def newGrouping(self, groups): @@ -238,7 +240,10 @@ class PolymorphicSolver(): self.sendTrainRequestToPerformanceModule() self.updateVirtualApplication() self.createComponents() - self.prepareMultiRLEngine() + if self.archetype_manager.initialAnalysis(): + #self.prepareMultiRLEngine() + self.first_deployment_completed = True + print('First deployment finished') else: print("Metrics to predict not ready, process will enter a loop waiting for the metrics to predict message") timeout = 180 # timeout metrics to predict message @@ -248,7 +253,10 @@ class PolymorphicSolver(): self.sendTrainRequestToPerformanceModule() self.updateVirtualApplication() self.createComponents() - self.prepareMultiRLEngine() + if self.archetype_manager.initialAnalysis(): + #self.prepareMultiRLEngine() + self.first_deployment_completed = True + print('First deployment finished') break else: if time.time() - _start >= timeout: @@ -305,7 +313,6 @@ class PolymorphicSolver(): data["models"] = models self.publisher.setParameters(data, performance_module_topic) self.publisher.send() - print("******************************") def updateVirtualApplication(self): metrics_data = {} @@ -359,6 +366,12 @@ class PolymorphicSolver(): if data['request'] == 'state': return None + if data['request'] == "application_ready": + if self.first_deployment_completed: + if not self.rl_engine_started: + self.prepareMultiRLEngine() + self.rl_engine_started = True + if data["request"] == "stop_rl_engine": print("Stop RL engine message received") self.stopRLEngine() @@ -542,15 +555,16 @@ class PolymorphicSolver(): def createComponents(self): for name, comp in self.virtual_application.items(): index = comp.getIndex() - #min_mem, max_mem, min_cpu, max_cpu,min_gpu, max_gpu, min_fpga, max_fpga, max_instances, hardware_list, variants, metrics = comp.prepareResource() + min_mem, max_mem, min_cpu, max_cpu,min_gpu, max_gpu, min_fpga, max_fpga, min_hpc, max_hpc, min_instances, max_instances, hardware_list, variants, metrics = comp.prepareResource() #self.env.createStates(name, min_mem, max_mem, min_cpu, max_cpu, min_gpu, max_gpu, min_fpga, max_fpga, max_instances, hardware_list, variants, comp.getMetrics()) - variants = comp.getVariants() - hardware_list = comp.getHWS() image = comp.getImage() - self.archetype_manager.createArchetypes(name, index, variants, hardware_list,image) + self.archetype_manager.createArchetypes(name, index, variants, hardware_list,image, min_mem, max_mem, min_cpu, max_cpu, min_instances, max_instances, min_gpu, max_gpu, min_fpga, max_fpga, min_hpc, max_hpc) self.archetype_manager.setNumberOfComponents(self.virtual_application.getNumberOfComponents()) def prepareMultiRLEngine(self): + print("RL Engine is starting ...") + #wait to application to be deployed + # environment_factory = functools.partial( self.make_environment ) diff --git a/polymorphic_solver/src/morphemic.py b/polymorphic_solver/src/morphemic.py index 2aa7baef..d5143d57 100644 --- a/polymorphic_solver/src/morphemic.py +++ b/polymorphic_solver/src/morphemic.py @@ -21,7 +21,7 @@ INDEX_CORES = 1 INDEX_MEMORY = 0 class MorphemicArchetype(): - def __init__(self, component_name, index, variant, hw, image): + def __init__(self, component_name, index, variant, hw, image, min_mem, max_mem, min_cpu, max_cpu, min_instances, max_instances, min_gpu, max_gpu, min_fpga, max_fpga, min_hpc, max_hpc): self.variant = variant self.hw = hw self.component_name = component_name @@ -33,6 +33,18 @@ class MorphemicArchetype(): self.avg_performance = 0 self.sum_performance = 0 self.uri_notification = None + self.min_cpu = min_cpu + self.min_gpu = min_gpu + self.min_fpga = min_fpga + self.max_cpu = max_cpu + self.max_gpu = max_gpu + self.min_hpc = min_hpc + self.max_hpc = max_hpc + self.max_fpga = max_fpga + self.max_instances = max_instances + self.min_instances = min_instances + self.min_mem = min_mem + self.max_mem = max_mem self.lowest = [None, None, None, None] self.highest = [None, None, None, None] print('Archetype for component index = {0} Variant = {1}, HW= {2} for component {3} created'.format(self.component_index, self.variant, self.hw, self.component_name)) @@ -59,14 +71,27 @@ class MorphemicArchetype(): return self.key def getComponentName(self): return self.component_name - + def getMinRequirements(self): + return self.min_mem, self.min_cpu, self.min_gpu, self.min_fpga + def getMaxRequirements(self): + return self.max_mem, self.max_cpu, self.max_gpu, self.max_fpga def clean(self): del self.collections[:] self.lowest = [None, None, None, None] self.highest = [None, None, None, None] self.avg_performance = 0 self.sum_performance = 0 - + def setLowestHighestFromRequirements(self): + min_cores, max_cores = None, None + if self.hw == "CPU": + min_cores, max_cores = self.min_cpu, self.max_cpu + elif self.hw == "GPU": + min_cores, max_cores = self.min_gpu, self.max_gpu + else: + min_cores, max_cores = self.min_fpga, self.max_fpga + + self.lowest = [self.min_mem, min_cores, self.min_instances, 0] + self.highest = [self.max_mem, max_cores, self.max_instances, 0] def getVariant(self): return self.variant def getIndexState(self): @@ -147,12 +172,12 @@ class MorphemicArchetypeManager(): for archetype in self.archetypes: archetype.clean() - def createArchetypes(self, component_name, index, variants, hws, image): + def createArchetypes(self, component_name, index, variants, hws, image, min_mem, max_mem, min_cpu, max_cpu, min_instances, max_instances, min_gpu, max_gpu, min_fpga, max_fpga, min_hpc, max_hpc): for variant in variants: for hw in hws: if hw == "FPGA" and variant == "SERVERLESS": continue - archetype = MorphemicArchetype(component_name,index, variant, hw, image) + archetype = MorphemicArchetype(component_name,index, variant, hw, image, min_mem, max_mem, min_cpu, max_cpu, min_instances, max_instances, min_gpu, max_gpu, min_fpga, max_fpga, min_hpc, max_hpc) self.archetypes.append(archetype) def getArchetypeByComponentName(self, name, variant, hw): @@ -207,6 +232,27 @@ class MorphemicArchetypeManager(): result += archetype.getAvgPerformance() return result + def initialAnalysis(self): + all_keys = [] + for archetype in self.archetypes: + all_keys.append(archetype.getKey()) + + archetypes = None + for group in combinations(all_keys, self.number_of_components): + if self.isGroupIsCompleted(group): + archetypes = self.getArchetypesByGroup(group) + for archetype in archetypes: + archetype.setLowestHighestFromRequirements() + + filename = self.camel_transformer.archetypesToCamels(archetypes, self.camel_version, self.current_grouping) + if filename: + self.cleanAllArchetypes() + self.camel_version +=1 + if self.importNewCamel(filename): + self.notifyMorphemic(filename) + return True + return False + def analyse(self): print("Analysis started ...") all_keys = [] @@ -260,7 +306,9 @@ class MorphemicArchetypeManager(): } try: response = requests.post(mule_hostname+self.uri_notification, data=json.dumps(data), headers={"Content-Type": "application/json"}) + print('*************Mule**************') print(response.text) + print("*******************************") except Exception as e: print(e) diff --git a/workflow_analyzer/target/SpringRestExample-1.0.0-BUILD-SNAPSHOT.war b/workflow_analyzer/target/SpringRestExample-1.0.0-BUILD-SNAPSHOT.war index 390212c74567343f2fb490bf7fc3b91bd006619d..df7896dc600d88495474dbbc74834a6faf9e6a9c 100644 GIT binary patch delta 12078 zcmZYF2Rv2(A3t!HOJ*{&BRhL5`CP||-?_joSi8WzuC0avCq=`?#zs@}5imfu8b%2r z-IYS&@T=yleKkrX_bCNl$fL?fFqy9y%02?fvac}4t77ykjN_`n?1J%J6(U_Q@vFkS z3nqJ2=ybzGklS6b8$@8J0ouR5_>kYXVS)sx1HeT?!$w0x%?-LMU5!iWhOu519o;bY zt75kscIT>~>wyVjDEbIg8}`6{!!W>sR;zu3iDIEDWcUzF3xN|vy5e&wVcVj+0=NGv%DO8y!G|&cNR^vN$0p4m7&jW_ol^n zG4YIj7ph(_isvs1YAmog5A7?j?`ux5__z&SrcB3^JI^a6-zpW@YdmW5J8wQ}>MY!r zKEg!fSh_$f&_=jre7VQ91}8YRy=M1DpHqr-m2%9fQ%ZJv&0c)9m`q=Ud)%gt6h85| zPQ=%BZFb;0^<6Xpd}DIQg=YevPrrOhm%6vhFj+oy4bw=-H$g{AB#G{U1^<(LI)X$u z`ujH5mv!%KMav(GiHXpvF>gG7Fuy#HqzqG%I$x%$A{ZFBYE_RL6g zeznu-$m@r*cV*q{u9NIx9=NU7Pe{Zo8CcreNG~TF(;I8k4|LIPHcn0io=*CKVbdec zjn#T$JZ0|rrov8ls8?CY4Xd)*>c6(#>m9D{;WE#x?~Qig&$pLcW=``~y2oy0Qe(?2WmfzME_zM#Q@)-1;aM`VPno6LhQlawy zm@)1V5tT&Y%`UW7bIO=?c$X<%RhNFy#i`NlP??2n53zxkz|mreTaw50VOddl bN zZS0}&Qx_e#g~tl1+6-s?-Ikzop-yx@n)P8Q17>5gw?iIO}xs!nJf865tRHHHORrxC2nJhnF z*U(>EapGY$Efmo^#_1ovrwy zedDXwW(D-ssS@lT?y8qme6SsNU7uN%RLkK>s!?!9c$nc7%@Z59Z-qKcdZdH}PxT4( z+x}STXfPk)aS9Yv#-Pfq$;k1D5qHJoPJOWXs+LL04?{V_6cNtFR2EbD>S@JLs6|h} zaMgmapcr%Pi>-H6bM%b7Qn@XRiIc{ecc`o*%T>ju64k#KW9*O`P2!iiZ{I&M)So2YpZG} zc&hz@c$$JctbJl#)H1M}o3q!2bGEQ%19=$8H5GBnzF?HX-4UxwoIwW1KYKfuytj^a z5is57)lU2yO*-Sv34NilU6|Veo6wvbc`#;kH^M*0a7?bWVQ*!)M_Xu)0grHzsFgF^ z9#8E@&TX2=0$ueQs`0_mAq#k^?anCK^Jc!EqzSyua(d@Fc(?T6qpvoc!W@e)HN>kK zR^PUJbhdT5iuN&Xjz``f`^*YQYP{Qii&>yM)KH`?MmQHo9&A>fY7)Cqr4;YFFx30H zNdvK!H`Q!6??;x8MZ2EUk0q@p_-@fDH)WsH4T7yNEhR&(8S;pxZWdCN(`0rmV8D~G8NdF+0B$j-t^y98B2O6kVCKA#AE8O zbHYt;r4w>^hxz6F|JgOPJ+gE;WnDa1bwC!jIS=2)5W?Q^6OnV2SC7I3TiZj_= z$06(_6#U(nAwuP~895Sb&~q`;HaovfnfrFJBrUxtlx(efHcBGS#Nd#yhFH#3+2}QYdaA61>U6w6cHqNl<=lZhiTVNxyNz$%Ip2PA`4INLp>81$k_eX9z<%r*rJM(`$x~64MlB%f|AI zpzZz2r@e1#!+Xo^o6MEtp&}`yZ6WvyZ5m;U?(2 zM#kT0E4ZDi*u-jtN5$cjqk1pLT{X$dl8&W6l08-15jhk-W$b9Y_`|k~KazBzBF<>8 z^_@W7%qv2YL=T>o2~2ClADuQGs!Zo;ZF2Jx7&-3pCKZ0Mi*i#cVq3`}`kYM&gmcZ# ze9eAKQ)RsCsOQk_x5P!njJ4k1IjQQRzW&I}cb^g>8Z{#((`4Agid&W6&NglpY3=@! zkGa^eY@s)TU0^zr+{Uo{XU9^wO22iGVU*5{irbIT3E7aC?d)t){@YMbbO77WW$}5F zhj_j?%|OcTzKyq?l2f2agi%#6;<~6wOwEpqfR+n29vcr&T33a|PLP=^y~FyA&Cho1 zx8%>bxkvolRnQA|B_F9qr{%kuJt9<3G81O{jb-6+UA#wqyr43Nmj}0`fKAM&#PvYv zk5nC^MI{<%CXl?mpYI}nW`X4yM}KiO?|FXz2U&RyDM%-ZUx?CDEH4_P8ew_Y7^}Y_ zKTc4)W(tpcRq}M$YM7O!WYvpfdOHBCub1l)Z^zk|*eyb%OPRKs1XCx^>o2!g?9KGa z@5_)@3tX%UFzE=F`tGZ1<5Sv)C3@TTgr}F_-0lfRZW2gD7V}jO@3rb|E_>Ic1ylAF zv@}FDCLDv3>8_ndUFT{byAZ*Uj{9s zgCqLH7lxXoWvDPqdy|Wv_2|Q2irz7V<*jVf^!_x9zqn+C@a!3(+bL(2F?+Kg6#m@1 z#7TqBS#ympunsPX&R2#n%CX*G%7i@_SI;DB>WABV4B;s24TwhmSxg)zOR~?ajX%$m)xKo^>PQSv9GYcykVTY@2z-WkuA0}GjEvm({KM-4;p8xPK!w@j7CXRy2ysA0{%d_U?z#hwCt7ua-fEm!4eg=RS>?IrNn zsvIIVLRG$TINWqt&u=#I?V;PLdfM^H=wU1V1o2z5pTfq2TEpasO7g=R+VIac{DeZe zWcFB8_(@&O_MbQ@>GQ7}5Z^QlC!H~?zgK)t|Y$?&$7Jq;uku*89MV?$$^V z`=`X}k`DnvOFEoP9}-vQveZj-@x)ljSzx?{@TYT~ct0La7bxsmkrZ?-R?{iexaMlT zQ9<6~m|PR+4%W|9MW_Yy#K>R|FYg9~O%F+*o2b&ojD)R`HU=!o`4p^^-s)f3#YCKR zOJ0Q)r2dzjJ?F(H&?bn{R2M+Iph~J%-K5ee2sF_yp zv@{Pj!|pc2X`L)1;k%%ur&*xEy0&{Ge1|Q3SEVF~{B)N|R}>LR6h!3QfPc-Z9o8WG z^$d+JIP^KZg^k2tdD%w2t)~5&RZeWi?c)bxADA3&*|-x$d^(4#kySRC!maRmRHIyk zew>v?D<>OD;~?j}5`T*f%*zdBH2FviYCLfL4KvYd^uqay-)e+z8shs@5R=O0;<_n; zJ}aBuzV}VC_B2gC2ZC!SEq-G%Dprj6>q~?3 zvuwE0NMU!tK|n1Vm3HP!G91hkH8~$vhsJwiu#c??}${n;xRA4_zz0cB_pdu!~58PCB}Che$Fc#+K$5qcL5_ z=q^qWr2U71qXm(2Od1@m^>U8y7SnTVZEgncOdIVxcT8A%vt7&m-`cqaP}+#GWi3&+ zzo2lX)ZY)kxuV=AA^Bkp^D#o$a{x{Ve#awr&e^o%`nD1w-;d zX(RLB<$A2XBj;HxAw7=1qWY+vHd!J!*&i31F!7TFoeanJXi>_j= zZl8;-!gFGwe7EAudQG3fEo!{Fm|Ifk^v%M@g`DX#T%7B>P&VTg7+9stWtd ze@b>G>^#%Eq)46E_kC<<^}un}@{t>&EO83TT<3bo;Q4VTYN_cGLtBmd8a7(H${QO(G_+niG_?QR zVn7Bi!Hf|JF0Q1bWSy6z8;=k9);qCM@IT&Fe?cIObyK-6=Y}mk-xwBtSx6|$;&hpm zqn~3^8YVuD%7mcXwBVCE73Gww3LLk%DwQAN+SvuVh0+}nsw%f2UG`h1l?4Tdj0NtO zevTuSlhpAAZ>0iPro8&aj?!nvybwVtiMV*=xblkl)h<7J9pPK9Ta-gutD7ci478pO_Gw&X zX+pSp4tnIL0u?H#6!ff5FW^(E=sN?{=OYSY(@HU*DOf`+InWwUB!A8hL5&I@ykeYQxPa zsveqD71rmt1t|F=qw05Mmm^Q7SAUm=N0snpIK2#SnK+X9>Zt=yL+5L(sJYP^VV@|^ z_KX%CF|Rk69BrK&75Ke_>*vcUStrbOylT=GUO$Pnx98J?6LcZV%a?WCg*SPA9u@xj zVi|74+fOw!@2fN*SK*m6>_%ojt=dDduM!!Yqwe`s!Et$?s9mBmm@LA5j8#mus1^t3 zka3Yln~%-H(22n1Gp&KUzI9j7x4@U#qwcn%w-I~$H>Ujp`wR&tH6O&2B8)rQ{X-r1 zgJw#N#oa{W(^}kf1^KdXZz^m~Tx8?-Yx8mP>G!s6na|K7BW)MdL+Ya6^iM37f1qu} zSuib)m>#OL^B%8laO={IrdJw!zR;_k&SB~};lzoa;uX*-?chk>m&PO-FzIm`Rm)I& zijFYHR3OWBH?{8hCRD;GCh&_`_UHkHaB**lNlfrNbhDHdk&u)&iEs}J+`iv;m-{<* z*zV!dr3n0F{7Mt|Dmbgqa6i#q?k<+yDo!Ed-!BmLRIW;hJ@nn#1hVPNy ze_#e)z(7U8}CofrrNOspU7F~n>=83*kd+D z{P?ZHl2LT4t9sm0zS+S`R&%|Z6kTU(+psmoTB=iYyQZ~kcx|t}D8@;WhVK0 zkB@^f`kOKye;#g<3uVR#17WOGj(UnA4x*5N7*@3WZB=Kc4t^Qnw;=F&H$d|B9Z8tJ zRJ1}~?tY9B!|SFng~s(@H0Gqldu~={pKNf|5-eIj-8qB%Hw1v5tG@v2EaAH(KVGj9gqhS=-;Gxom$HqJ{@NjZP^KAB1N!etynUmSN%UF7pdm6UP!;OSNU;@LI0>fYXiz z&Kd^KyznbNy7@nLUDtBm&Gn8GpV3*09vz1<7-EH$=?i)yNF*zD z#g2Vh#kTu3JT()Zhy4;X?ET$ndRzjct7au{PVXdL> z=qaPM&oczMAAi{8x186jT6&Hzt~oA)KJmC=kRsl~J(~2e>hZ6GO3b_Hl zf(AcFWbty#TMPx^`zo&TFX4ETKiP~a-G<+HNvbXKr|PSdwMFN@?DxFC`i5}UFIBzZq@;8@4I*|@9yeJ& zrLp{Jw8c+nC%Ce!Mz{D#^i-G<@g&CWOO@q4jS4d(f{*F>p%Z&Q3n5bF;ck4rJ||GW9M2u zvkeo!`p*fHdKY#dfz?&S$z%OFUsw|pGpJ3NAmnMZ8LzPD0of#t<< zR{6g2+Wl6|kV6HpEgt??vJvn@MfKqU-{?ZeQeH>H z!GYNO3e?^N_1@racZgSlkuZ5YG%@2PR@x zYgBIx*w=cF<7F1#g(UWYvAFUX8LV6wBK38st z(2Hz{5Ie2B_Y=a~)8Qk1nlMe94S@;mc59&X;g>yhl}WD0=-28LN6qdmv3i;}HxY`BZ(;;-4C;`c`fW?DoO zN{I-)pAT)xe3i>>;PzlH*MlaKw2rb6D{o4f!H*&Lexvvwg!@}MK@PH2-RE)NdZ#ML z)cQGy!e0}z4eMqavF7Z^hI0rD+~hX0hmV8|iKpwP);r6qr!yF|n}pbHFW0hD>Pw2m zI;ZN0J+_>a_wrp1aibiKx>?&u!?xF-Y2~nw8Pdm+AcU8%T12>d%*#FKTrG8J_pOV(>L z-=}gHcR%Rp+{Gaf@d+YcuAAX+qzV0`e(7&u_&KDGsLDxw62le@#iTgo3jIL3GoMo2pS`~u*N$Dmuwjd zkj1@@*~Fs;#6aT5+y*{cBI+pi5=?^LS5hts1mcc2vgaA}PmBx*mcIPL@_gr3?V;5E z>GjPW_K+E_-H%--SlR|6t{RIZi*iNd-`!HXInpeD>{2$!SI;Kwp}(}4BpzrKI=b}I zwQP49?UX4H|1h{>@S)-M&W5;1v2TWB?1N`tB{Ew>bSe;P2D7Pt_8807Y3k3LU+_2| ze|r6KuFy4}+U%^1nG97AANdPVa*w%UnPp10eP&+Dj9TGD zN;QA7HX|uQ9Dkyf-y%&MFE|h~Xg;+_GI~r53o|c1J4g!`{AQJu@MN-;e9WN)DoXmM z(jo6nt+@KFpug$OZ@UgrJG$Yg7CIRApBH&6V!GU0In#G`mB%Y`Q^*sIt=*^ z!`(*w992Zli4P`6Lwmhft5c5+Q`1=rm?PZc(P_=d#@LzVt*zJT9vI5?M@b|{OR%#( zUNm^q(umN&bpLWPsAt5Tm6_uaYLradu*~Y;^)oE#k;IR4thERuZ|oqhiBoabcc7DhF zP{A`~c2+LX@lwX!{n}#}f7j@kti`Fo7mM#^l2!{4nYl3#m8OC^K{jiR#vvaQ*l4?pRBJFc<>a*5aK1ta=R-0W!o`U5g z3`;{x#9bM!^bWNKw5Df!f-*6;7SMy`BATdbeOR3%cBO(K%}h00zx?W3v2o|=>oGfa z;(jvhGV^B7*hr4qgBATQQd47WIuSU7?=v;zBEpeb%>p;596udnikZU0(3*<`7LCr% z{$R=96$M1u}LXHkBpc zxW@PrYc;n=y^?`tiuY2ej(GeDuVzABC1pth=V+}`oWnb-pC$Zj!}~4FdWNUHZ|~si;!og2i!8aYT1N0Q z_^M2&-}Ik0e(e;4m0y@4Akw7YuJ0} zvD+-y(9lqKIR0~Yqk8@rR{r;q1mqu>JjT_-2{Y#~M|4zGsf3<(RaCE_qhq5Jp&qgL z*8>q~5~wF4*pYpIU|7{WnCNa-!y_PUbN~Z@1DF67a1Fo)Z~$BY55NZqfa?Gua04I$ zhyfCS6d(i0ftvsYKnYL*)Bp`Y3(x_#0D6D{xD7A@OaL>$0CfN5BcV4>$u4 z02ja&a04C!j{tYT1MmdA0B^tt@CEz;e;@#O35A20#TmDlkw1M+GJ-uuyRg6<7UmP!%pJ@KAw|3IbGIM+G4&ZlHn)6~w3@ zK?Nx)$WTF!ikqmQKm{c#s8B(T3K~?2T_ zqvz?NZP30BeFRB^x^_c-pt8Vt@}9Su`=&JP7bYC_JR7oqGm{a@sIY?JK^9;$IW$md z1;a&Jj%MOPUbNCM8`RjCQBZ-9LB(BE$fDvND&$Zhj|v4;D563M70RekL4_(R)KH<0 z3Jp|fqCyK5+NjV$g)S=eP@#_s15_BI!Uz?{s4zi=DJslRVU7w5R9M2AM`^8K>t?9q zZSDxL0`J__X)gs@!M@}CZ>2(q7{Uo)oH+kHG1$BpWCatTzj`wuiI)~IDo8%f3Pyo? zP!F;KmBim&AtRPxR}I*}bhT?1RH}d9LaP7=Do82a3MP#6&m)2m!w+T{8I%IHO8)Kj z>Y6AObO742|6ebC=$~G##DP>$324*&eOYZa)XV+?8|eN{Q`Wdd1uo+yM~gG&DDyoW*Md;RLX|M^!2WRhtGGiN|;{P$noivL=}vqR(;VMrVB@FMD= zM=XdQjKGBm-h(5$nwAq()EHYEFy!trj2{XDO~$JU|NW6d>pv4JZvE|ywAh8wK{v9j zVE9*4-2xRg#_GYE&uFkIk zRMZ%`nCPgtlM6Y8jZTVu&CG`jG3Qvp=rJg9(2)%I=**BCXtJSiRzXNmXk%0yQJ}Vy zA_x9Z+<>mWwO=t04t(8AF66-D(WM?MS?CBB*n7iuZ7g25rI zJS!L#M!>onGGt4Q5mE$AHgv~G$Q=~c7#kyc$o+A>+fWtgd>x5(SN{gV|IRQwgg{Ck zHHrkuc2}PaQqD&mLBVr<$ODWegI<8P7)Cm`6*7mv5&pNM40Sj; zL=8?67y1tN(4jxqgLXlohq2s|iQM{{$pX0*qu!XlQ3weNLyWP@_s|Zg^e}EJk0NiW zjnYGoU_2W-nkp0v3Qcs`QfLko#^`RukoZS%5kNl+gu+3gi?NRR^XinKZO{bI71it` zOX~MUAy@E{xX`0ga0D3iGy8u}TXKH?=GAu>7wWj$GopNgoKZVrf^f^g3&Mr@Fk#Sl z@LeW>w6UyUn2;veEDGoE$A&=?gV?ZJQ2tfF^?ri?_S*!TWznl_A>L1@!`xnjqClYv z-`HpUZ^s?5nal#UjI520B`kuU^eQ+S8u*LgpReqYW;|)w|8BeaIoNXhySZws!O%%D z{$4;)gDcK5T0VjyTLAe#L($N(LuLse+j4Nh{JVlhfszV72-Y+tPY<}L9e`zwg!BKM zP2uij^(2@j2prC>|IEff48g!U#dY<&#Rn>CxC#lF%;U=v+5f$sLpmZ-V9yJ%2YLfm zJUXZlbjAO>F4~k)P{Bw3@2+#uff&L8D<1xBurL|~+Z?QfW1^uYQlp`X{O84mN&lG- zwMqJ}e2{inmrRLt97cki@N^ zL0bwPy$XUrp^a`W1MPxB4=#vd_3v*SBNSW>ex9!0hL@nCM*rpu<$;3nYE$3UrVW2+ O9TZGhO<*NNL;F9&&{-4! delta 12651 zcmY-01z1$w-ZpS%=tk*OxW!6VewK*u0M!@UhC9_5w8tLc3($jb{3O5YmmK5TPo!!=kk`!?N@?`jLvFmJ+U zFr=>G8ScWJEIy4bq6{yn4OrOn{R1wvwc3($m^O@^#Z!L zEuzUoW>^KA8K18bZ(UWRps?wPPR3++z+7BC`Mg_M`6Z}QqRGqKZD-;0m#WN?ho*(O z)5C@Zi#^}J=dP~jKD8ED|2dgKk+#IswTRw5-Vx$Byi1GAYT%=Ix6LrPG5n#Wy*Av@ zj)j6SGTV{qD}|m~PPl)nVM%grhwq1IqSYQYnyt%(l~@e)5YNm-cvtF1wWQ3wbj?B) zeKsicTJ!v6DmU~bBvEls@RXuMOrgo$=a-KEpf%1ZbVuY4E)YZMD4uIMNy=*D-* z^yt?{*AE?S|IabO_|_y03`s_jIqKn9u3eY&Ovi=u=oDB2idgwR@ubftI1zNusb{%G zJ=RlVCiNm%x8%r*ehauT#$iM!IyS6W)E_iQ2{=Stw1m>#($(O^kMnmGkA_#oK@KZY z$5yfGUSd1Z>3Z!5yR**8b=5yeqOxL$>Q$N0&U5%Os&?Hw)hS@%y4>BYtoyYZTHMsw z{)x#lOK>6yrzDcvC}K_A!fgo)|G{_W)x|lkP*SR@UBwShujb}LFdM=fpLIV)Z=U<0 zav-4XAy%T0SSW-$$wW%9$R!HjIQt!D-mqXQ#gb&ckNaxab+vAu>2X8en*%lc=S$k5 z*7Z?@U56(!K9&)0XpnX84|W+ZgjllF3~4>{SOO0PmK`5?>XmV&s~J9>2c zOC-SUY49-hOClaeoeU*v1)|O3~lpY9%ew5mBPgjyii2K-kSuhZ0 zdyy0Njk@k?-ZYCqawyG<2c9(O7M%34uYs&rz9&)WBFtqa(q3BC?(>rSasJ@hTwQ2gAP6-7z@gbuaPPb-ufhlM`g=m-%-))_;GGrk0lTR74K3H=ZyRiQ4@=l>~YxgPcf1e2TCf)B!yl`*u#&4YeVyZ4 zeVMQOW>R{ow8}}X$d~LkUHV+;QPyp~Jtv(W>s%xAI2y9<{gi`^jpj84!p~Cn-InNt7u1)H}JA z_f=E#)b3l_j;~ThV+Q=Y19$pvZK%R%&8nuEJjOja@s>(Q;j|p zhkv-Bxtv2NCP~!5PlXLCUnqNa#V4FZOde#Ld)3Dj6(hQN;7&|@4Ta|ZIDH|Gk`q!Q zyloG8#LA`(#~YoDbx&hW>re0{qnl!6fbgy-;~&R)RvyaX;NiY+m~YX zjI?rRo$|CXQn~tMlkm|=QJ{i(fHOP)f#l*o6XIDEe$!k6Joxil<{Gwz0uPC{qssh0 z>6J2UEK$akih*N|QoB=R)-9j)sfKl4k8X?<$T5r9j{!z=E&Zr?s?8*+!hb zweLK%EbM+_2QSC|LRx0q#1h4JQeMWP?Iv3|p{iZv5KFtl7o>7!ANJ>0^Q{r6=PnuU z{VlTW(U|AB$B63NSFRa4tiQAPJ9W~A&Uq&`;-vboW1by;+vXp zbX+11|AjrN>Bq?RM=jNhZS!sSU&OF|npjZ1Dq5K(sYricYy}Ii4y4O^(L1@hZnn;Gw^rmOIb&))>*5=fKP{|Ge3nqlbG@+NIALL9l#^z{+1oU{r? zeCH%A3p?{;M<=X!^bg`*p@V?yPdYFSbOm^q;a!Ew){rPBYyXiH}neaO7X#eY2=F}!gnmFFi;$BV%M#;a~T^m2lFs#&`{1fS)= z069lu`RbFFh_O`GV~XQs!IsbDobQ@GRdW$>YL7L;@7U|yT7TX^O81L;v0Tk0MR13n zka`hL62m#r)pJ2@qflfPMWSMGmXXyLf7(@1Z=fZ@6*w4ZPaU{>_YyO$Lzb2Pk4;FK ziqVx0`N{EWEkW*ns>6Q$>(d(UI~&6rq2wKSm0<+Ar$(C(`S*Vo5m@;|BJB4^0_myG z#cjwn;hz)D#fpu!i^e|5|1$E2!|!nW;t`V~$uFT_ zx6W{<-U`SjD09vYjRiGB1r{SPcDr$;`vpdGjAk>0A#=U*&I~HXX^IX{4BT*IY)SvKGe`w#B#wj7jQ(TiEUhHlTt0Y=6 zc6+}>MPQ2f07`$v77!+08s!uv5WtO{jaa^U#U`&a7w%V1^QoKzv(TX3?x4H ze%aB^uiG!l&KmmS?3Fb22b|6##VtNMVbvHJM$JR+^H`d)L!+tw>`OEx?h=yA@QIz3 z@{>V?40i{4YO?fnzm0f9-l3RSyW*>vezPfl_|&h`0DMW2XFkeliJUFU1=UaHdmq!{ z4&SqBQLdm7GkGan415 zVw1xrJN|Kyy~spWzlm^L4cqkE@%+81r#h=caI{rcdW8MOeIHe9QbX;S{OlCQ zQFh922<%lM%>|plX)U|xtp0G#XFlt4Dx<@LMEb>?mJu&cYo}y|v-+OvU0|=9sDJ6Q zcm|J*d+sE^`quw28T*=2R|6ZTC~Pe|h6D{wkr55;UvI=9^j09Ia1&QAIwR`fr*&;Y zZ4&46!km-DIQ6mGeRENk(o0G^n zb59YI%r@SQ!r{RALlWP2$M-CsPRf6(Gw@hg`uSs@w`ysg`{~&^etQAmE`q;o1nuQC z>9n8W{bS^7A{Q<@JPMC(*c+ZVzKH00h5jWYR)9K0R#t(8G31jHF86W;d8y0OIhu%< zHK9=v9Ks<-D?V?6N^*uwm7n!2e#zBxxc%8QV1hxTXCYF#Z6J3zGlPNIo~=nV+Rivn zoDcizBr0RkWg$*B2#(R`jJobFIBnmbPsXyO@hWt%ABI7|DP1pBATtA#XM7M|B` z)@663R+JwnGz*taeO%)3_yHXo`Q03W;p3mW$sVvC@%ubsmqnz|E;=l!SY$nxy9RT8 zEX@L8`zs>@ho@x>BVoNY)0L({C`T)4$Fbk~!8FxYz0>%wM)+7mTgP|4G8_5s!k-Pc z0m)oX$x3T7`l|859uVCh(Asm+_lT)@;nK$?!L8CXIQu2>;$-VvQybp3_M^ zpW|>C8%vd+)vwP!X# z_a)YhnUfQ2k!D6byO5_fWSO>+yi@7Z(lmou$4d%lkA;sI{6*@UA+x8hrYgBqsBH{x$?j?%qp! z>}mh)+8_TBSp)09R)g{|E@YhBNV~YKMOugwQ}KHEs-Unl(qU|HQ^{WYyg~S($R^c+ z<(2Y0%w)k$9KLeW&&I4U<0~zYo-WBfv6Ueq0AonCf580Xmy`_Sn;kui+t$T`;r&ly z=V+WATrEGNUnk_uiDcHFdO3Vwk#t`9Dn0bJvEtVgAIBK|Z3sg>&EN^q;m`?TuCa&n zhdwgu388paYF|8QXykR^la*Kdf4I&j&o6}h9{dOiXRmn&t$y8TFrGS#w-x=SoP8fNWVD7sIs_k6 z!Pih9Mpk99%IYQbPwr?bL$qa5-*4?#JyGywqS>D=(dPwhek_ zPah4*2!)8fBp06f`O80tVaRqe!K!j*!(;7JV%dtnA$;ce$$-mb0mGPRsDne_5|=Ul zn-7?X;xBw8G~tnj?&Q(sta44?6kjmBBHS{nI*4*mtxj{{^=WX(bs4c&Ih+k9G4eMx zVI9eEXMl-CeRNIAdr^v9{&LH(QEG*0MNb;pD=57|`k>?h)@LcHOSI$_NUU|ndT@My z^h1E@%Tu`D^FIkq8)$BQM(-&2d!E&jxqg?dk0JURHS;KZb6M&+y3p5nBO6lEV8YK) zdS7E*iUXm}A1j+p7o(qg$W-UVA3Qr}`5aTwtSK2u_+6AgygFa!-3a&6Uai`@c;4^T z@2ak77e1EEGHNeBlvLKykuSNT2*i#rh_mA9QPvmu4Zq#}`~c&}*0Q>!+V;%EX?yGn zuJMO}*%x~T+O~8Qe0_|D0yLyD@2w4mMuS8@wBRtyMBGWzZ{B5}^2FgPXdYV;NRzPO zxEmsl4mBAasw_WK5s zE0jHN&obZ@=K5oLRiVbxAC9Ihq9=R4ZLKlx&f=4~GWEqUjd4iI96h_XD3dIT)BEwf zG~)xSF4+&g8dV6glEpE4>dQ&K@tx6z*2(e^of_+x-(r@> zOfdsvUvd%@6u*1VWRmZW%w1QKwf#c&V^I83;E8)STp*k~oh5@|Zb^Gdsk+RlKcT$; z$3b~T=j;^L{AFV?_B(BosTWr4BuA`k&qHOG%}LJ~}vQ<`@>a>RgrgwA4YaA?V1)b|C9c<&JuzRLa{ zyBSjA`wJGE)id|*chgY^oblvk!@DofSA3eP+#bP|wjGN2R?G`dVojXt6W?>x-YG8n z>FZKEQM&i=GCM`xO}o{*v}O{+44+2+Hct2(reAgDMsGGha9ikgY1f7ty{?U*h@!ZB zZEEx?`^aU@X()?(YJ#HFz%(=gdt_@6tvDUu^?qAm7mwNDtv5P0>2$Y|pR&WebU!KJ zZkWu4!^7}5xHrvH-d&&3e%ttP?&BvL^nz;J%{%15r}ggL%0{LiP|Q+{U$Wao(K95` z60CA|0lwa2M$fI=3NKTajiuBFe{e4;YpE;KJh?)Kv-AS9vm)ZAbD1220 z0=}$y-%@rX5te%>t*H@-Qx*JUK}S9RMERZCX4&zcXz&O*q{T`4*zodIFx`6EXdiN) z!o-jap4C*s3!6$Sxv#^eQ)au{py5qh&mp{t|Hatct{LaZkvNh&x+GcuQA1&|?^6?s zHA`PiKdRO*x1G<^W~%$^Y<=D54k_~_f9A>v9xl(b z;i00v%bS)w!QOhOtovI258C&*r6$_KFvNH}kBK+$ zOrMN6@(wj-+sI;He0{gqzGqngzurJzL*0pfdek$%W_(jXe?irXILCY7vPjTb1I;P&wmJ3F}o^tU(+$2VFvM={^jln@W7o zcsT46qg0mL-)uF$zw@ERp1B~m$X4`2!Z6k%zCQWxIYy44AKYxVtS*bvhdu85EkZT= z`0qhvCnrY)dPAJe1S-D;vr_EPspVVYjB&QU7Tp)#h3?tbdETXXPUb0LMr2)%MK?it3DsjR({db4 zAA$`pNmdbJU6Op?+(H_@d)a~f>+4}K4|-USht=mz)`-c<=`tdoNmHKKi4_r81pHQa z-D`fKX40pq=OIXj+E! zUtm~JvO^9wNLpNDtSG%^qGfOvod0COd15s?u7c-UG$We{!mn?fYb#psJpMrqyn90{7_iyFRU3-A7Xx1={Mqxj&06CM*0gnjY4JDJ5w*>I`GEe_|1*+UK%+;bEwoTGYzqF!E~nel6GV zg6X8YN?*wVbwrot0i{wEu3;mUR_#{CmdBI5VFvA@A|2%PfmmSf?OLv91Vcdqf}c9u zPOh0bE8)xd=G^ikrCwY1bA~V*Umtv}SJu;b`h2t=689XN$SQ26SH#SPt>dOYlI@qD z)kRcqo^f?38+c#bPhWbxSt*yLy&iz?VD6e7UN3h$>eiLwJy-CHTGLEW z;jU^o_Gwoi5Oi=9P2q)&58UfqNO9Q;z7F7Wi{*#fy%xZ4Bw|f+`r2zlKaYREJGjFe zm)(bgoc#46Yo>c_11l9~#qDLCp+gVTo83By!4)kclzU+eRj zcCePirx@+R)$2;kDIRziC%(;o*G1c^OvS)x^aD~~KK)@os@8epBsEotfbxW(fM}2@ zJ7-{2DEr%s5b*~liR`*}ZRt?95)F;|{0q#b@86J8JygY>q$6Qok@iW-#&5x%<-Uo$|NiR_Tscd!N|B z3o^J=QX;RhzE8-^UcQ9K*_}0z)cfU`3%J44Sv$b1tjh%95rGC}%;($gvGnVb~6W)b+*P~DB%$b%(`B6_Vc3SleH7jNpN52?4W34^U{O)yUAa3d63?d*O7-lixCess- z(-vcTArLG-%)quDtp1HDEU9a3-}Cz+W#Kv1{2z_GJ2c`Vbd&o@zn?Q`b_WtOLUv+f z;gHTZ+$%H3{#RJ(@!yu3jyd-wE*absj){*aO6m7iQmbW_#|pVTVR<^K<{A9u8Qa5; zRDb5#c<(#JmL`DFzxl;2lJOK;&0(Uadxd6A}E>nz8BGSG&h@-Vn-=1QTK2- zl?Z$>tY~k>FQc4bD~Q&cc0GRYQKV?aBL1z5+b#b>ODUz^?D-lCdY0WxF zktI0leW>f=^aM3*G_+{q|K5E6-h2NwZATqU;W1Ha!aafH{hfPLaSBmHznO(|^c(UR zdQ(|kL2~|9m)8)SzZEtV`sQX_WIZMnKnE}YOaKeO25s~5}*R80UCf7pabXu2H*~G7hnXK0A_#%UvVa^Q4=4bNfD)h#r~s;f8lVnn0Gfan zpbh8%x_};_4;TQ3fDvE}m;k1L8SoG=2P^=@EMo^W`Q|i9#{YtfiJ*U;2W?6 zECb(x6<`%u1J;2JU=!E^egHp#ZD0r31@?e_-~c!Tj(}s}7jOcc0%yQE@Ef=QE`clH z4{#k*j|qtfPys~+Iw~+wanpf?s<2Ukg9=UR3a*f*%$4P$7T{K~xB#LKqbysJM@c2dEH5g%~QtQ6Yf}NmNKdI>E&Q zLXL%go@s^vGa#uu*YKqw7*GtbY=qn)l!olxl$}dN(hx9-6V=>_nv2N-xtYU>40+5B zk@!2ElTHeD7pX%d4Ix7IkV-@7{D*?8iw$GW>1cc0^6}hUF+kwK?Onv6|$(1Lxnsl6i}gv3MEu1qe2B0s;E#y zg*qxUP@#zmEmUZuLI)MPsL(@&J}L}QVTcMNR2ZYe1Qn*JFhj*dRG6c}0u`31u!3}M z)4(7<%~8jTL>(_RvacQnA?$b#gPh#l#gON|Fv$2#|A-$9B8sXyar|Krp}T+Qx9Ub& zLTHd?nJ@^|-v+r9R6{&y_|J%cG+PHT!MhGFb1EEBG^7w*-eDoUwf^oCy|9vr`^RG=49sFGl zi4AtZbu)O*=}nb11P$#wfO>uLe_w?mQ)f4WO>cHv0al=ezWn{4Lwl4DgJ2=HP9fw7 zx6yl8$bBYE$ju~RBrWKng^pkS&%nEks1xqkw4&mMqM>PFprNV!-(JA?!VqyZC^8?c zW_UABXsxL`3mi-{7%Ts;ar00p!g>(Gn<@sSKoD~ZU?HmmFd$erYkh^?3syu&L%RoF zL`nQ>tu>6lYa!ph21j;tGUGQ@63*Y<-b3CgfI%eCF$kcD@lz@?ggP$t7P6`s2Ejwd zf*uCwYzwji6uRih-&Cj}EJ)@;@B>1>Mi)c~Km=)#PB)82#-fNw15q|)>&>EspXkl@ zkmq1g5^A@Ld{6{~Xrk-I>LT^fVc>~?1k1|kGjj$A+!+HVBp%o$2l|ELN$;NL7|5JQ*#zUuRhC)90H3R=sa(~AgpS>@FQ6(P={tpj?@8# zI&>o#nFR`c^v^hJsPm&l+`(ES`Oi;>5u6(XG_egS2MTL+{N>}n4?zo9l!10>Ab*2G z3xiP@6M{SdTOrM@fk7~D9(GKmV->i^?``R~SQ{@xrIzJ0 z3YNcZNK_j}`z>h*av$78c#%Iq1Hs?E-#2{(25u&|kqjUSPgLJdU6ra|IaFacl8IgEsQ~WaNKtr zWgz58!MEUA10`laT7kk4W2af>Zx0)C5j1dMXvoX`-%xsRXOP6Wg9ZD~LyB|<4ZJsJ tkOwMSXtN)(9TdzrmoB12HE0qz9A0p>xc#rgnhD5l01q-&Ss)Db{Xb=W{*M3v -- GitLab