Skip to content

Commit

Permalink
Merge pull request kubeedge#78 from hsj576/main
Browse files Browse the repository at this point in the history
Merge of feature-lifelong-n branch
  • Loading branch information
jaypume authored Jul 25, 2023
2 parents 3f4dff0 + adcaafc commit c024813
Show file tree
Hide file tree
Showing 248 changed files with 54,262 additions and 75 deletions.
3 changes: 3 additions & 0 deletions README_ospp.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# OSPP
I changed the sedna source code to implement my algorithm.
Please turn to https://github.com/kubeedge/sedna/pull/378 and https://github.com/nailtu30/sedna/blob/ospp-final/README_ospp.md for more information.
25 changes: 25 additions & 0 deletions core/common/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,46 @@ class ParadigmType(Enum):
SINGLE_TASK_LEARNING = "singletasklearning"
INCREMENTAL_LEARNING = "incrementallearning"
MULTIEDGE_INFERENCE = "multiedgeinference"
LIFELONG_LEARNING = "lifelonglearning"


class ModuleType(Enum):
"""
Algorithm module type.
"""
BASEMODEL = "basemodel"

# HEM
HARD_EXAMPLE_MINING = "hard_example_mining"

# STP
TASK_DEFINITION = "task_definition"
TASK_RELATIONSHIP_DISCOVERY = "task_relationship_discovery"
TASK_ALLOCATION = "task_allocation"
TASK_REMODELING = "task_remodeling"
INFERENCE_INTEGRATE = "inference_integrate"

# KM
TASK_UPDATE_DECISION = "task_update_decision"

# UTP
UNSEEN_TASK_ALLOCATION = "unseen_task_allocation"

# UTD
UNSEEN_SAMPLE_RECOGNITION = "unseen_sample_recognition"
UNSEEN_SAMPLE_RE_RECOGNITION = "unseen_sample_re_recognition"


class SystemMetricType(Enum):
"""
System metric type of ianvs.
"""
# pylint: disable=C0103
SAMPLES_TRANSFER_RATIO = "samples_transfer_ratio"
FWT = "FWT"
BWT = "BWT"
Task_Avg_Acc = "Task_Avg_Acc"
Matrix = "Matrix"

class TestObjectType(Enum):
"""
Expand Down
24 changes: 22 additions & 2 deletions core/storymanager/rank/rank.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import pandas as pd

from core.common import utils
from core.storymanager.visualization import get_visualization_func
from core.storymanager.visualization import get_visualization_func, draw_heatmap_picture


# pylint: disable=R0902
Expand Down Expand Up @@ -107,7 +107,10 @@ def _get_all_module_types(cls, test_cases) -> list:
def _get_algorithm_hyperparameters(cls, algorithm):
hps = {}
for module in algorithm.modules.values():
hps.update(**module.hyperparameters)
for name, value in module.hyperparameters.items():
name = f"{module.type}-{name}"
value = str(value)
hps.update({name: value})
return hps

def _get_all_hps_names(self, test_cases) -> list:
Expand Down Expand Up @@ -170,6 +173,7 @@ def _get_all(self, test_cases, test_results) -> pd.DataFrame:
return self._sort_all_df(all_df, self._get_all_metric_names(test_results))

def _save_all(self):
# pylint: disable=E1101
all_df = copy.deepcopy(self.all_df)
all_df.index = pd.np.arange(1, len(all_df) + 1)
all_df.to_csv(self.all_rank_file, index_label="rank", encoding="utf-8", sep=" ")
Expand Down Expand Up @@ -199,10 +203,21 @@ def _get_selected(self, test_cases, test_results) -> pd.DataFrame:
return selected_df

def _save_selected(self, test_cases, test_results):
# pylint: disable=E1101
selected_df = self._get_selected(test_cases, test_results)
selected_df.index = pd.np.arange(1, len(selected_df) + 1)
selected_df.to_csv(self.selected_rank_file, index_label="rank", encoding="utf-8", sep=" ")

def _draw_pictures(self, test_cases, test_results):
# pylint: disable=E1101
for test_case in test_cases:
out_put = test_case.output_dir
test_result = test_results[test_case.id][0]
matrix = test_result.get('Matrix')
#print(out_put)
for key in matrix.keys():
draw_heatmap_picture(out_put, key, matrix[key])

def _prepare(self, test_cases, test_results, output_dir):
all_metric_names = self._get_all_metric_names(test_results)
all_hps_names = self._get_all_hps_names(test_cases)
Expand Down Expand Up @@ -241,6 +256,11 @@ def save(self, test_cases, test_results, output_dir):
if self.save_mode == "selected_only":
self._save_selected(test_cases, test_results)

if self.save_mode == "selected_and_all_and_picture":
self._save_all()
self._save_selected(test_cases, test_results)
self._draw_pictures(test_cases, test_results)

def plot(self):
"""
plot rank according to the visual method, include
Expand Down
2 changes: 1 addition & 1 deletion core/storymanager/visualization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@
# limitations under the License.

# pylint: disable=missing-module-docstring
from .visualization import get_visualization_func
from .visualization import get_visualization_func, draw_heatmap_picture
20 changes: 19 additions & 1 deletion core/storymanager/visualization/visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
"""Visualization"""

import sys

import os
import matplotlib.pyplot as plt
from prettytable import from_csv


Expand All @@ -25,6 +26,23 @@ def print_table(rank_file):
table = from_csv(file)
print(table)

def draw_heatmap_picture(output, title, matrix):
"""
draw heatmap for results
"""
plt.figure(figsize=(10, 8), dpi=80)
plt.imshow(matrix, cmap='bwr', extent=(0.5, len(matrix)+0.5, 0.5, len(matrix)+0.5),
origin='lower')
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('task round', fontsize=15)
plt.ylabel('task', fontsize=15)
plt.title(title, fontsize=15)
plt.colorbar(format='%.2f')
output_dir = os.path.join(output, f"output/{title}-heatmap.png")
#print(output_dir)
plt.savefig(output_dir)
plt.show()

def get_visualization_func(mode):
""" get visualization func """
Expand Down
35 changes: 32 additions & 3 deletions core/testcasecontroller/algorithm/algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,19 @@
import copy

from core.common.constant import ParadigmType
from core.common.utils import load_module
from core.testcasecontroller.algorithm.module import Module
from core.testcasecontroller.algorithm.paradigm import (
SingleTaskLearning,
IncrementalLearning,
MultiedgeInference,
LifelongLearning,
)
from core.testcasecontroller.generation_assistant import get_full_combinations


# pylint: disable=too-few-public-methods
class Algorithm:
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-few-public-methods
"""
Algorithm: typical distributed-synergy AI algorithm paradigm.
Notes:
Expand All @@ -53,14 +55,20 @@ class Algorithm:
def __init__(self, name, config):
self.name = name
self.paradigm_type: str = ""
self.third_party_packages: list = []
self.incremental_learning_data_setting: dict = {
"train_ratio": 0.8,
"splitting_method": "default"
}
self.lifelong_learning_data_setting: dict = {
"train_ratio": 0.8,
"splitting_method": "default"
}
self.initial_model_url: str = ""
self.modules: list = []
self.modules_list = None
self._parse_config(config)
self._load_third_party_packages()

def paradigm(self, workspace: str, **kwargs):
"""
Expand Down Expand Up @@ -93,6 +101,9 @@ def paradigm(self, workspace: str, **kwargs):
if self.paradigm_type == ParadigmType.MULTIEDGE_INFERENCE.value:
return MultiedgeInference(workspace, **config)

if self.paradigm_type == ParadigmType.LIFELONG_LEARNING.value:
return LifelongLearning(workspace, **config)

return None

def _check_fields(self):
Expand All @@ -113,6 +124,11 @@ def _check_fields(self):
f"algorithm incremental_learning_data_setting"
f"({self.incremental_learning_data_setting} must be dictionary type.")

if not isinstance(self.lifelong_learning_data_setting, dict):
raise ValueError(
f"algorithm lifelong_learning_data_setting"
f"({self.lifelong_learning_data_setting} must be dictionary type.")

if not isinstance(self.initial_model_url, str):
raise ValueError(
f"algorithm initial_model_url({self.initial_model_url}) must be string type.")
Expand All @@ -138,7 +154,7 @@ def _parse_modules_config(cls, config):
for module in modules:
hps_list = module.hyperparameters_list
if not hps_list:
modules_list.append((module.type, None))
modules_list.append((module.type, [module]))
continue

module_list = []
Expand All @@ -152,3 +168,16 @@ def _parse_modules_config(cls, config):
module_combinations_list = get_full_combinations(modules_list)

return module_combinations_list

def _load_third_party_packages(self):
if len(self.third_party_packages) == 0:
return

for package in self.third_party_packages:
name = package["name"]
url = package["url"]
try:
load_module(url)
except Exception as err:
raise RuntimeError(f"load third party packages(name={name}, url={url}) failed,"
f" error: {err}.") from err
83 changes: 34 additions & 49 deletions core/testcasecontroller/algorithm/module/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from core.testcasecontroller.generation_assistant import get_full_combinations


# pylint: disable=too-few-public-methods
class Module:
"""
Algorithm Module:
Expand Down Expand Up @@ -52,8 +53,8 @@ def __init__(self, config):
self.type: str = ""
self.name: str = ""
self.url: str = ""
self.hyperparameters = None
self.hyperparameters_list = None
self.hyperparameters = {}
self.hyperparameters_list = []
self._parse_config(config)

def _check_fields(self):
Expand All @@ -71,75 +72,59 @@ def _check_fields(self):
if not isinstance(self.url, str):
raise ValueError(f"module url({self.url}) must be string type.")

def basemodel_func(self):
def get_module_instance(self, module_type):
"""
get basemodel module function of the module.
get function of algorithm module by using module type
Parameters
---------
module_type: string
module type, e.g.: basemodel, hard_example_mining, etc.
Returns
--------
------
function
"""
class_factory_type = ClassType.GENERAL
if module_type in [ModuleType.HARD_EXAMPLE_MINING.value]:
class_factory_type = ClassType.HEM

if not self.url:
raise ValueError(f"url({self.url}) of basemodel module must be provided.")
elif module_type in [ModuleType.TASK_DEFINITION.value,
ModuleType.TASK_RELATIONSHIP_DISCOVERY.value,
ModuleType.TASK_REMODELING.value,
ModuleType.TASK_ALLOCATION.value,
ModuleType.INFERENCE_INTEGRATE.value]:
class_factory_type = ClassType.STP

try:
utils.load_module(self.url)
# pylint: disable=E1134
basemodel = ClassFactory.get_cls(type_name=ClassType.GENERAL,
t_cls_name=self.name)(**self.hyperparameters)
except Exception as err:
raise RuntimeError(f"basemodel module loads class(name={self.name}) failed, "
f"error: {err}.") from err
elif module_type in [ModuleType.TASK_UPDATE_DECISION.value]:
class_factory_type = ClassType.KM

return basemodel
elif module_type in [ModuleType.UNSEEN_TASK_ALLOCATION.value]:
class_factory_type = ClassType.UTP

def hard_example_mining_func(self):
"""
get hard example mining function of the module.
Returns:
--------
function
"""
elif module_type in [ModuleType.UNSEEN_SAMPLE_RECOGNITION.value,
ModuleType.UNSEEN_SAMPLE_RE_RECOGNITION.value]:
class_factory_type = ClassType.UTD

if self.url:
try:
utils.load_module(self.url)
# pylint: disable=E1134
func = ClassFactory.get_cls(
type_name=ClassType.HEM, t_cls_name=self.name)(**self.hyperparameters)
type_name=class_factory_type, t_cls_name=self.name)(**self.hyperparameters)

return func
except Exception as err:
raise RuntimeError(f"hard_example_mining module loads class"
f"(name={self.name}) failed, error: {err}.") from err
raise RuntimeError(f"module(type={module_type} loads class(name={self.name}) "
f"failed, error: {err}.") from err

# call built-in hard example mining function
hard_example_mining = {"method": self.name}
# call lib built-in module function
module_func = {"method": self.name}
if self.hyperparameters:
hard_example_mining["param"] = self.hyperparameters

return hard_example_mining
module_func["param"] = self.hyperparameters

def get_module_func(self, module_type):
"""
get function of algorithm module by using module type
Parameters
---------
module_type: string
module type, e.g.: basemodel, hard_example_mining, etc.
Returns
------
function
"""
func_name = f"{module_type}_func"
return getattr(self, func_name)
return module_func

def _parse_config(self, config):
# pylint: disable=C0103
Expand Down
1 change: 1 addition & 0 deletions core/testcasecontroller/algorithm/paradigm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,4 @@
from .incremental_learning import IncrementalLearning
from .singletask_learning import SingleTaskLearning
from .multiedge_inference import MultiedgeInference
from .lifelong_learning import LifelongLearning
Loading

0 comments on commit c024813

Please sign in to comment.