Skip to content

Commit

Permalink
[2024.01.15 CoCheLab] commit-1
Browse files Browse the repository at this point in the history
 Updates for [#3](#3)
 1.Completed the model coding of conventional algorithms LRU, LFU, and FIFO.
`2.CoCheLab will(train/test) support the above algorithms in future updates.
  • Loading branch information
DarriusL committed Jan 15, 2024
1 parent ed00276 commit 74dad40
Show file tree
Hide file tree
Showing 7 changed files with 300 additions and 0 deletions.
63 changes: 63 additions & 0 deletions config/fifo/fifo_ml1m.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
{
"net": {
"type": "FIFO",
"bs_storagy": 1000
},
"seed": 6655,
"linux_fast_num_workers": 4,
"email_reminder": false,
"dataset": {
"type": "ml1m",
"path": "./data/datasets/process/complete/ml_devide_55.data",
"crop_or_fill": false,
"fill_mask": 0,
"limit_length": 55
},
"train": {
"batch_size": 256,
"max_epoch": 1000,
"valid_step": 10,
"stop_train_step_valid_not_improve": 50,
"gpu_is_available": false,
"use_amp": false,
"optimizer_type": "adam",
"learning_rate": 0.001,
"weight_decay": 1e-08,
"betas": [
0.9,
0.999
],
"use_lr_schedule": false,
"lr_max": 1e-05,
"metric_less": true,
"save": true,
"model_save_path": "./data/saved/caser/ml1m/64_8/model.model",
"end_save": false
},
"test": {
"batch_size": 256,
"cache_satisfaction_ratio": 0.2,
"bs_storagy": 1000,
"slide_T": 3,
"alter_topk": 10,
"metrics_at_k": [
5,
10,
20
],
"cache_size": [
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0
],
"gpu_is_available": false,
"save": true
}
}
64 changes: 64 additions & 0 deletions config/lfu/lfu_ml1m.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
{
"net": {
"type": "LFU",
"bs_storagy": 1000,
"recent_used_n":100
},
"seed": 6655,
"linux_fast_num_workers": 4,
"email_reminder": false,
"dataset": {
"type": "ml1m",
"path": "./data/datasets/process/complete/ml_devide_55.data",
"crop_or_fill": false,
"fill_mask": 0,
"limit_length": 55
},
"train": {
"batch_size": 256,
"max_epoch": 1000,
"valid_step": 10,
"stop_train_step_valid_not_improve": 50,
"gpu_is_available": false,
"use_amp": false,
"optimizer_type": "adam",
"learning_rate": 0.001,
"weight_decay": 1e-08,
"betas": [
0.9,
0.999
],
"use_lr_schedule": false,
"lr_max": 1e-05,
"metric_less": true,
"save": true,
"model_save_path": "./data/saved/caser/ml1m/64_8/model.model",
"end_save": false
},
"test": {
"batch_size": 256,
"cache_satisfaction_ratio": 0.2,
"bs_storagy": 1000,
"slide_T": 3,
"alter_topk": 10,
"metrics_at_k": [
5,
10,
20
],
"cache_size": [
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0
],
"gpu_is_available": false,
"save": true
}
}
64 changes: 64 additions & 0 deletions config/lru/lru_ml1m.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
{
"net": {
"type": "LRU",
"bs_storagy": 1000,
"recent_used_n":100
},
"seed": 6655,
"linux_fast_num_workers": 4,
"email_reminder": false,
"dataset": {
"type": "ml1m",
"path": "./data/datasets/process/complete/ml_devide_55.data",
"crop_or_fill": false,
"fill_mask": 0,
"limit_length": 55
},
"train": {
"batch_size": 256,
"max_epoch": 1000,
"valid_step": 10,
"stop_train_step_valid_not_improve": 50,
"gpu_is_available": false,
"use_amp": false,
"optimizer_type": "adam",
"learning_rate": 0.001,
"weight_decay": 1e-08,
"betas": [
0.9,
0.999
],
"use_lr_schedule": false,
"lr_max": 1e-05,
"metric_less": true,
"save": true,
"model_save_path": "./data/saved/caser/ml1m/64_8/model.model",
"end_save": false
},
"test": {
"batch_size": 256,
"cache_satisfaction_ratio": 0.2,
"bs_storagy": 1000,
"slide_T": 3,
"alter_topk": 10,
"metrics_at_k": [
5,
10,
20
],
"cache_size": [
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0
],
"gpu_is_available": false,
"save": true
}
}
36 changes: 36 additions & 0 deletions model/framework/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# @Time : 2024.01.15
# @Author : Darrius Lei
# @Email : [email protected]

from lib import util, glb_var
from collections.abc import Iterable, deque

logger = glb_var.get_value('log');

class Cache():
'''Abstract Cache class to define the API methods
'''
def __init__(self, cache_cfg, type) -> None:
if type.lower() not in ['fifo', 'lru', 'lfu']:
logger.error('This function only supports FIFO/LRU/LFU.')
raise RuntimeError;
util.set_attr(self, cache_cfg);
self.cache = deque(max = self.bs_storagy);

def extend(self, cache:deque, __iterable:Iterable, is_unique:bool) -> None:
if is_unique:
for item in __iterable:
if item in cache:
cache.remove(item);
cache.extend(__iterable);

def check_unique(self, __iterable:Iterable) -> int:
n = int(0);
for item in __iterable:
if item in self.cache:
n += 1;
return n;

def update(self) -> None:
logger.error('Method needs to be called after being implemented');
raise NotImplementedError;
16 changes: 16 additions & 0 deletions model/framework/fifo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# @Time : 2024.01.15
# @Author : Darrius Lei
# @Email : [email protected]

from model.framework.base import Cache
from collections import deque

class FIFO(Cache):
'''First in first out algorithm'''
def __init__(self, cache_cfg, type) -> None:
super().__init__(cache_cfg, type);


def update(self, seqs) -> None:
''''''
self.extend(self.cache, seqs.tolist(), is_unique = True);
38 changes: 38 additions & 0 deletions model/framework/lfu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# @Time : 2024.01.15
# @Author : Darrius Lei
# @Email : [email protected]

from model.framework.fifo import FIFO
from collections import deque, Counter
import numpy as np

class LFU(FIFO):
'''Least Frequently Used Algorithm'''
def __init__(self, cache_cfg, type) -> None:
super().__init__(cache_cfg, type);
self.cache_unique = True;
self.recent_used = deque(maxlen = self.recent_used_n);
self.recent_unique = False;
self.cache_left = self.bs_storagy;

def pop_item(self, n:int) -> None:
if n == 0:
return;
c = Counter(self.recent_used);
sort_indx = np.argsort(list(c.values()));
for i in range(n):
self.cache.remove(list(c.keys())[sort_indx[i]]);

def update(self, seqs) -> None:
''''''
seqs = seqs.tolist();
self.extend(self.recent_used, seqs, self.recent_unique);
n_unique = self.check_unique(seqs);
if self.cache_left >= n_unique:
self.cache_left -= n_unique;
elif self.cache_left == 0:
self.pop_item(n_unique);
else:
self.pop_item(n_unique - self.cache_left);
self.cache_left = 0;
self.extend(self.cache, seqs, self.cache_unique);
19 changes: 19 additions & 0 deletions model/framework/lru.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# @Time : 2024.01.15
# @Author : Darrius Lei
# @Email : [email protected]

from model.framework.lfu import LFU
from collections.abc import Iterable

class LRU(LFU):
'''Least Recent Used Algorithm'''
def __init__(self, cache_cfg, type) -> None:
super().__init__(cache_cfg, type);
self.recent_unique = True;

def pop_item(self, n: int) -> None:
if n == 0:
return;
for i in range(n):
self.cache.remove(self.recent_used[i]);

0 comments on commit 74dad40

Please sign in to comment.