Skip to content
Snippets Groups Projects
Commit b436c1d0 authored by Chamalee Wickrama Arachch's avatar Chamalee Wickrama Arachch
Browse files

new scripts

parent 992e54bd
No related branches found
No related tags found
No related merge requests found
"""
Effect of lambda: Fixed
Affect of Lambda: Fixed
Dataset-1
"""
import numpy as np
import pandas as pd
import utils
import sbm_core
import math
from itertools import combinations
import itertools
import itertools
from sklearn.metrics.cluster import adjusted_rand_score
# Initilaize
np.random.seed(34573251)
results = np.zeros((50,3) , dtype=float)
for itr_no in range(0,50):
num_roles=2
num_vertices=20
num_segments = 4
NO_SAMPLES= 1850
group_assignment= np.random.randint(num_roles, size=(num_vertices))
nodes = np.arange(num_vertices)
list_of_groups= [[] for _ in range(num_roles)]
np.random.seed(107)
for idx, val in enumerate(group_assignment):
list_of_groups[val].append(nodes[idx])
results = np.zeros((20,3) , dtype=float)
# print(list_of_groups)
num_roles=2
num_vertices=20
num_segments = 2
size_all_pairs = {}
for k in range(0, num_roles):
for g in range(k, num_roles):
U=list_of_groups[k]
W=list_of_groups[g]
NO_SAMPLES= 1850
group_assignment= np.random.randint(num_roles, size=(num_vertices))
if k == g:
size_all_pairs[k,g] = math.comb(len(U), 2)
if k != g:
size_all_pairs[k,g] = len(U)*len(W)
nodes = np.arange(num_vertices)
lamda_arr = np.ones((num_roles, num_roles,num_segments) , dtype=float)
lamda_arr_act = np.zeros((num_roles, num_roles,num_segments) , dtype=float)
list_of_groups= [[] for _ in range(num_roles)]
for idx, val in enumerate(group_assignment):
list_of_groups[val].append(nodes[idx])
size_all_pairs = {}
for k in range(0, num_roles):
for g in range(k, num_roles):
U=list_of_groups[k]
W=list_of_groups[g]
num_levels = 2
H =num_levels
if k == g:
size_all_pairs[k,g] = math.comb(len(U), 2)
if k != g:
size_all_pairs[k,g] = len(U)*len(W)
# h-level lambda estimates
lambda_estimates_h = np.random.rand(num_roles, num_roles, H)
lamda_arr = np.ones((num_roles, num_roles,num_segments) , dtype=float)
# _itr = 8 # Change _itr from 0 to 8 for large lambda differences
# _itr = 1
# yu = (9-_itr)*.1
# lambda_estimates_h[0,0,:] = [yu, 0.01]
# lambda_estimates_h[0,1,:] = [0.01, yu]
# lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
# lambda_estimates_h[1,1,:] = [yu, yu]
#set value for each delta ( 0.01 - 1)
yu = 1
_itr = 5 # Change _itr from 0 to 8 for smaller lambda differences
yu = (9-_itr)*.01
lambda_estimates_h[0,0,:] = [yu, 0.01]
lambda_estimates_h[0,1,:] = [0.01, yu]
lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
lambda_estimates_h[1,1,:] = [yu, yu]
lamda_arr[0,0]=[yu, 0.01]
lamda_arr[0,1]= [0.01, yu]
lamda_arr[1,0]=lamda_arr[0,1]
lamda_arr[1,1]=[yu, yu]
lamda_arr_act = np.zeros((num_roles, num_roles,num_segments) , dtype=float)
l1 =list(range(0, H))
l2 = []
if num_segments > num_levels:
l2 = [np.random.randint(0,H) for i in range(num_segments-H)]
change_points_arr = np.zeros((num_roles, num_roles, num_segments+1) , dtype=int)
df_all= None
# Mapping from segment to a level
g_mapping= np.array(l1 + l2)
# print('g mapping {}'.format(g_mapping))
#Initialize lamda
lamda_arr = np.zeros((num_roles, num_roles,num_segments) , dtype=float)
for d in range(0, num_segments):
lamda_arr[:,:, d]= lambda_estimates_h[:,:,g_mapping[d]]
points= list(range(0, (num_segments+1)*NO_SAMPLES, NO_SAMPLES))
list1 = []
change_points_arr = np.zeros((num_roles, num_roles, num_segments+1) , dtype=int)
# 20 iteartions
for itr_no in range(0,20):
df_all= None
points= list(range(0, (num_segments+1)*NO_SAMPLES, NO_SAMPLES))
list1 = []
# Generate piecewise non-homogeneous poisson process
for k in range(0, num_roles):
for g in range(k, num_roles):
comb = []
if k == g:
comb = list(combinations(list_of_groups[k], 2))
comb = list(combinations(list_of_groups[k], 2))
# print(type(comb))
else:
# comb = []
key_data = [list_of_groups[k],list_of_groups[g],]
comb = list(itertools.product(*key_data))
comb = list(itertools.product(*key_data))
# print(comb)
if len(comb) != size_all_pairs[k,g]:
print('not equal..')
change_points_arr[k,g,:] = points
lamda_arr[k,g,:] = lamda_arr[g,k,:]
tot_count = np.zeros((num_segments) , dtype=float)
for pair in comb:
for d in range(0,num_segments):
s = np.random.poisson(lamda_arr[k,g,d], NO_SAMPLES)
# print(np.count_nonzero(s))
tot_count[d] += np.count_nonzero(s)
list1=[i for i, e in enumerate(s) if e != 0]
if len(list1) == 0:
print('zero')
list1 = [x+points[d] for x in list1]
df = pd.DataFrame(data=list1)
df.columns =['timestamp']
df.columns =['timestamp']
N= df.size
list_start_stations =[pair[0]] * N
list_start_stations =[pair[0]] * N
list_end_stations =[pair[1]] * N
df['source'] = list_start_stations
df['source'] = list_start_stations
df['target'] = list_end_stations
df_all=pd.concat([df_all, df], ignore_index=True)
for d in range(0,num_segments):
lamda_arr_act[k,g,d] = tot_count[d]/(NO_SAMPLES*len(comb))
# print(tot_count[d])
## Other preparations
# Remove self loops
df_all = df_all[((df_all['source'] ) != (df_all['target']))]
df_all = df_all[((df_all['source'] ) != (df_all['target']))]
#sort
df_all=df_all.sort_values('timestamp')
df_all = df_all[['target', 'timestamp','source']]
# Save as .csv file
# df_all.to_csv('./Data/synthetic_ground_truth_g1.csv')
df=df_all
dest_folder='./Results/synthetic/3'
t_df = df['timestamp']
nodes_arr = np.union1d(df['target'],df['source']).astype(int)
# list of nodes
nodes_arr = np.union1d(df['target'],df['source']).astype(int)
# list of nodes
nodes = nodes_arr.tolist()
num_vertices = len(nodes)
# node-group dictionary
group_dic = {}
keys = nodes
values = list(group_assignment)
group_dic = dict(zip(keys,values))
# create a new dictionary - key: node-pair , value: list of timestamps
dic=df.groupby(['source','target'])['timestamp'].apply(list).to_dict()
# print('{} {} {} '.format(group_dic, lamda_arr_act,change_points_arr))
liklihood_sum = sbm_core.compute_cost(group_dic,lamda_arr_act,change_points_arr,num_roles,num_segments,dic)
# print(' Initial Actual likelihood .......%f'%liklihood_sum)
def _swap (row):
if row['source'] > row['target']:
row['source'] , row['target'] =row['target'] , row['source']
return row
# Undirected graph
df=df.apply(lambda row: _swap(row), axis=1)
#scale timestamps for zeroth reference point
refValue = df['timestamp'].min()
df['timestamp'] -= refValue
# Experiment
import experiment
import experiment
# User parameters
num_roles=2
# num_segments=4
# num_levels=2# Optional arg
num_segments=2
num_levels=2# Optional arg
algo_ver=3
dest_folder='./Results/synthetic/'
# tuning parameters
theta = 1e-7
theta = 1e-5
eta = 1
tuning_params= {'theta':theta,'eta':eta}
exp_obj = experiment.Experiment(df,num_roles,num_segments,algo_ver,dest_folder,tuning_params,num_levels,refValue)
exp_obj = experiment.Experiment(df,num_roles,num_segments,algo_ver,dest_folder,tuning_params,num_levels,refValue)
[itr_d,likelihood_d,group_dic_d,lambda_estimates_d,change_points_arr_d] = exp_obj.execute()
t_df = sorted(t_df)
t_df = sorted(t_df)
chg_points = change_points_arr_d[0,0,:]
ranges_arr = [ [chg_points[s]+1,chg_points[s+1]] for s in range(0,len(chg_points)-1)]
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
# iterate over timestamps list
dis_arr = list()
gt_arr = list()
dis_arr = list()
gt_arr = list()
for item in list_time_stamps:
# find the segment which the timestamp belongs
# (is dependent on which groups the two nodes belong)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
dis_arr.append(d)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
dis_arr.append(d)
chg_points = change_points_arr[0,0,:]
ranges_arr = [ [chg_points[s]+1,chg_points[s+1]] for s in range(0,len(chg_points)-1)]
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
# iterate over timestamps list
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
# iterate over timestamps list
for item in list_time_stamps:
# find the segment which the timestamp belongs
# (is dependent on which groups the two nodes belong)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
gt_arr.append(d)
ind = adjusted_rand_score(gt_arr,dis_arr)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
gt_arr.append(d)
ind = adjusted_rand_score(gt_arr,dis_arr)
# print('rand index: seg {} : {}'.format(_itr, ind))
g1= group_dic_d
g2= group_dic_d[1]
ds= list(group_dic_d.values() )
gt1 = list(g1.values())
ind_grp=adjusted_rand_score(ds,gt1)
# print('rand index: group {} : {}'.format(_itr, ind_grp))
g2= group_dic_d[1]
ds= list(group_dic_d.values() )
gt1 = list(g1.values())
ind_grp=adjusted_rand_score(ds,gt1)
# print('rand index: group {} : {}'.format(_itr, ind_grp))
results[itr_no][0] = ind
results[itr_no][1] = itr_d
results[itr_no][2] = ind_grp
import pickle
pickle.dump(results, open('large-fixed-file-{}.pickle'.format(_itr), 'wb'))
arr = results
ll_avg_val = (sum(arr)/len(arr))
#FINAL RESULTS
print(ll_avg_val)
print(max(arr[:,0]))
print(min(arr[:,0]))
print(max(arr[:,1]))
print(min(arr[:,1]))
print(yu)
......@@ -5,316 +5,274 @@
Effect of lambda: LD
Dataset-2
"""
import numpy as np
import pandas as pd
import utils
import sbm_core
import math
from itertools import combinations
import itertools
import itertools
from sklearn.metrics.cluster import adjusted_rand_score
# Initilaize
np.random.seed(34573251)
results = np.zeros((50,3) , dtype=float)
results = np.zeros((20,3) , dtype=float)
for itr_no in range(0,50):
num_roles=2
num_vertices=20
num_segments = 4
num_levels = 2
NO_SAMPLES= 1850
nodes = np.arange(num_vertices)
lamda_arr_act = np.zeros((num_roles, num_roles,num_levels) , dtype=float)
H =num_levels
print('k-h levels %d'%(num_levels))
# h-level lambda estimates
lambda_estimates_h = np.random.rand(num_roles, num_roles, H)
_itr = 3
yu = (9-_itr)*.1
lambda_estimates_h[0,0,:] = [yu, 0.01]
lambda_estimates_h[0,1,:] = [0.01, yu]
lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
lambda_estimates_h[1,1,:] = [yu, yu]
_itr = 4
yu = (9-_itr)*.01
lambda_estimates_h[0,0,:] = [yu, 0.01]
lambda_estimates_h[0,1,:] = [0.01, yu]
lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
lambda_estimates_h[1,1,:] = [yu, yu]
l1 =list(range(0, H))
l2 = []
if num_segments > num_levels:
l2 = [np.random.randint(0,H) for i in range(num_segments-H)]
# Mapping from segment to a level
g_mapping= np.array(l1 + l2)
print('g mapping {}'.format(g_mapping))
# initilaize group assignment randomly
group_assignment_arr= np.random.randint(num_roles, size=(num_levels,num_vertices))
# node-group dictionary
group_dic = {}
for i in range(0,num_levels ):
level = i
group_dic_level = {}
keys = nodes
values = list(group_assignment_arr[level])
group_dic_level = dict(zip(keys,values))
group_dic[i] = group_dic_level
print('initial')
# print(group_dic)
for e_h in range(0,num_segments):
g_a = group_dic[g_mapping[e_h]]
list_of_groups= [[] for _ in range(num_roles)]
for idx, val in g_a.items():
list_of_groups[val].append(idx)
print('group assignments {}: {}'.format(e_h,list_of_groups))
# Plotting
#Initialize lamda
lamda_arr = np.zeros((num_roles, num_roles,num_segments) , dtype=float)
for d in range(0, num_segments):
for k in range(0, num_roles):
for g in range(k, num_roles):
lamda_arr[k,g, d]= lambda_estimates_h[k,g,g_mapping[d]]
lamda_arr[g,k, d]= lamda_arr[k,g, d]
change_points_arr = np.zeros((num_roles, num_roles, num_segments+1) , dtype=int)
df_all= None
points= list(range(0, (num_segments+1)*NO_SAMPLES, NO_SAMPLES))
list1 = []
level_seg_mapping = {}
for d in range(num_segments):
level = g_mapping[d]
if level in level_seg_mapping:
level_seg_mapping[level].append(d)
else:
level_seg_mapping[level] = []
level_seg_mapping[level].append(d)
# %%
num_roles=2
num_vertices=20
num_segments = 4
num_levels = 2
NO_SAMPLES= 1850
nodes = np.arange(num_vertices)
lamda_arr_act = np.zeros((num_roles, num_roles,num_levels) , dtype=float)
H =num_levels
# h-level lambda estimates
lambda_estimates_h = np.random.rand(num_roles, num_roles, H)
# set value for each delta ( 0.01 - 1)
yu = 8*.1
lambda_estimates_h[0,0,:] = [yu, 0.01]
lambda_estimates_h[0,1,:] = [0.01, yu]
lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
lambda_estimates_h[1,1,:] = [yu, yu]
l1 =list(range(0, H))
l2 = []
if num_segments > num_levels:
l2 = [np.random.randint(0,H) for i in range(num_segments-H)]
# Mapping from segment to a level
g_mapping= np.array(l1 + l2)
# print('g mapping {}'.format(g_mapping))
# initilaize group assignment randomly
group_assignment_arr= np.random.randint(num_roles, size=(num_levels,num_vertices))
# node-group dictionary
group_dic = {}
for i in range(0,num_levels ):
level = i
group_dic_level = {}
keys = nodes
values = list(group_assignment_arr[level])
group_dic_level = dict(zip(keys,values))
group_dic[i] = group_dic_level
for e_h in range(0,num_segments):
g_a = group_dic[g_mapping[e_h]]
list_of_groups= [[] for _ in range(num_roles)]
for idx, val in g_a.items():
list_of_groups[val].append(idx)
# print('group assignments {}: {}'.format(e_h,list_of_groups))
#Initialize lamda
lamda_arr = np.zeros((num_roles, num_roles,num_segments) , dtype=float)
for d in range(0, num_segments):
for k in range(0, num_roles):
for g in range(k, num_roles):
lamda_arr[k,g, d]= lambda_estimates_h[k,g,g_mapping[d]]
lamda_arr[g,k, d]= lamda_arr[k,g, d]
change_points_arr = np.zeros((num_roles, num_roles, num_segments+1) , dtype=int)
df_all= None
points= list(range(0, (num_segments+1)*NO_SAMPLES, NO_SAMPLES))
list1 = []
level_seg_mapping = {}
for d in range(num_segments):
level = g_mapping[d]
if level in level_seg_mapping:
level_seg_mapping[level].append(d)
else:
level_seg_mapping[level] = []
level_seg_mapping[level].append(d)
# %% 20 iteartions
for itr_no in range(0,20):
# Generate piecewise non-homogeneous poisson process
tot_count = np.zeros((num_levels) , dtype=float)
com_len = np.zeros((num_levels) , dtype=float)
# for pair in comb:
for i in range(0,num_levels):
# i = g_mapping[d]
group_assignment = group_assignment_arr[i]
print(group_assignment)
list_of_groups= [[] for _ in range(num_roles)]
for idx, val in enumerate(group_assignment):
list_of_groups[val].append(nodes[idx])
# print(list_of_groups)
size_all_pairs = {}
for kk in range(0, num_roles):
for gg in range(kk, num_roles):
U=list_of_groups[kk]
W=list_of_groups[gg]
if kk == gg:
size_all_pairs[kk,gg] = math.comb(len(U), 2)
if kk != gg:
size_all_pairs[kk,gg] = len(U)*len(W)
for k in range(0, num_roles):
for g in range(k, num_roles):
change_points_arr[k,g,:] = points
lamda_arr[k,g,:] = lamda_arr[g,k,:]
comb = []
if k == g:
comb = list(combinations(list_of_groups[k], 2))
comb = list(combinations(list_of_groups[k], 2))
# print(type(comb))
else:
# comb = []
key_data = [list_of_groups[k],list_of_groups[g],]
comb = list(itertools.product(*key_data))
comb = list(itertools.product(*key_data))
# print(comb)
if len(comb) != size_all_pairs[k,g]:
print('not equal..')
print('d val {}'.format( d))
com_len[i] = len(comb)
# print('comb len {}'.format( com_len[d]))
com_len[i] = len(comb)
tot_count[i] = 0
for pair in comb:
s = np.random.poisson(lamda_arr[k,g,d], NO_SAMPLES)
# print(np.count_nonzero(s))
tot_count[i] += np.count_nonzero(s)
list_org=[i for i, e in enumerate(s) if e != 0]
if len(list_org) == 0:
print('zero')
for d in level_seg_mapping[i]:
list1 = [x+points[d] for x in list_org]
df= None
df = pd.DataFrame(data=list1)
df.columns =['timestamp']
N= df.size
list_start_stations =[pair[0]] * N
list_start_stations =[pair[0]] * N
list_end_stations =[pair[1]] * N
df['source'] = list_start_stations
df['source'] = list_start_stations
df['target'] = list_end_stations
df_all=pd.concat([df_all, df], ignore_index=True)
lamda_arr_act[k,g,i] = round(((tot_count[i])/(NO_SAMPLES*com_len[i])),1)
lamda_arr_act[g,k,i] = lamda_arr_act[k,g,i]
print(' {} {} {} {} : k g d :lamb'.format(k,g,i,lamda_arr_act[g,k,i]))
lamda_arr_act[k,g,i] = round(((tot_count[i])/(NO_SAMPLES*com_len[i])),3)
lamda_arr_act[g,k,i] = lamda_arr_act[k,g,i]
# print(' {} {} {} {} : k g d :lamb'.format(k,g,i,lamda_arr_act[g,k,i]))
# Remove self loops
df_all = df_all[((df_all['source'] ) != (df_all['target']))]
df_all = df_all[((df_all['source'] ) != (df_all['target']))]
#sort
df_all=df_all.sort_values('timestamp')
df_all = df_all[['target', 'timestamp','source']]
# Save as .csv file
# df_all.to_csv('./Data/synthetic_ground_truth_g1.csv')
df= None
df=df_all
dest_folder='./Results/synthetic/3'
t_df = df['timestamp']
nodes_arr = np.union1d(df['target'],df['source']).astype(int)
# list of nodes
nodes_arr = np.union1d(df['target'],df['source']).astype(int)
# list of nodes
nodes = nodes_arr.tolist()
num_vertices = len(nodes)
# create a new dictionary - key: node-pair , value: list of timestamps
dic=df.groupby(['source','target'])['timestamp'].apply(list).to_dict()
print('{} {} {} '.format(group_dic, lamda_arr_act,change_points_arr))
# print('{} {} {} '.format(group_dic, lamda_arr_act,change_points_arr))
def _swap (row):
if row['source'] > row['target']:
row['source'] , row['target'] =row['target'] , row['source']
return row
# Undirected graph
df=df.apply(lambda row: _swap(row), axis=1)
#scale timestamps for zeroth reference point
refValue = df['timestamp'].min()
df['timestamp'] -= refValue
chg_points = change_points_arr[0,0,:]
ranges_arr = [ [chg_points[s]+1,chg_points[s+1]] for s in range(0,len(chg_points)-1)]
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
# iterate over timestamps list
gt_arr = list()
gt_arr = list()
for item in list_time_stamps:
# find the segment which the timestamp belongs
# (is dependent on which groups the two nodes belong)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
gt_arr.append(d)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
gt_arr.append(d)
# Experiment
import experiment
# User parameters
algo_ver=4
dest_folder='./Results/synthetic/'
# tuning parameters
theta = 1e-7
eta = 1
tuning_params= {'theta':theta,'eta':eta}
exp_obj = experiment.Experiment(df,num_roles,num_segments,algo_ver,dest_folder,tuning_params,num_levels,refValue)
exp_obj = experiment.Experiment(df,num_roles,num_segments,algo_ver,dest_folder,tuning_params,num_levels,refValue)
# [likelihood_f,group_dic_f] = exp_obj.execute()
[it,ll1,group_dic_d,lambda_estimates,change_points_arr_d]= exp_obj.execute()
# SEGMENTATION ACCURACY
t_df = sorted(t_df)
# SEGMENTATION ACCURACY
t_df = sorted(t_df)
chg_points = change_points_arr_d[0,0,:]
ranges_arr = [ [chg_points[s]+1,chg_points[s+1]] for s in range(0,len(chg_points)-1)]
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
ranges_arr[0][0] = 0
list_time_stamps = list(t_df)
# iterate over timestamps list
dis_arr = list()
dis_arr = list()
for item in list_time_stamps:
# find the segment which the timestamp belongs
# (is dependent on which groups the two nodes belong)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
dis_arr.append(d)
d = sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
dis_arr.append(d)
gt_arr= np.array(gt_arr, dtype=np.float64)
dis_arr= np.array(dis_arr, dtype=np.float64)
ind_seg = adjusted_rand_score(gt_arr,dis_arr)
print('ind {} : {}'.format(_itr, ind_seg))
dis_arr= np.array(dis_arr, dtype=np.float64)
ind_seg = adjusted_rand_score(gt_arr,dis_arr)
# print('ind {} : {}'.format(_itr, ind_seg))
# print('g mapping {}'.format(g_mapping))
print('g mapping {}'.format(g_mapping))
for e_h in range(0,num_segments):
g_a = group_dic[g_mapping[e_h]]
list_of_groups= [[] for _ in range(num_roles)]
for idx, val in g_a.items():
list_of_groups[val].append(idx)
print('group assignments {}: {}'.format(e_h,list_of_groups))
# print('group assignments {}: {}'.format(e_h,list_of_groups))
g1= group_dic_d[0]
g2= group_dic_d[1]
# print('rand index: group {} : {}'.format(_itr, ind_grp))
# print('rand index: group {} : {}'.format(_itr, ind_grp))
found_cont = 0
for i_h in range(0,num_levels):
# i_h level
grp = group_dic_d[i_h]
......@@ -323,29 +281,31 @@ for itr_no in range(0,50):
for idx, val in grp.items():
list_of_groups_d[val].append(idx)
ds= list(group_dic_d[i_h].values() )
gt1 = list(g1.values())
ds= list(group_dic_d[i_h].values() )
gt1 = list(g1.values())
gt2 = list(g2.values())
ind1=adjusted_rand_score(ds,gt1)
ind2=adjusted_rand_score(ds,gt2)
ind2=adjusted_rand_score(ds,gt2)
d_in = max(ind1,ind2)
found_cont += d_in
ind = found_cont/2
results[itr_no][0] = ind_seg
results[itr_no][1] = it
results[itr_no][2] = ind
print('end')
results[itr_no][2] = ind
# print(ind_seg)
# print(lamda_arr)
# print(lamda_arr_act)
print('end')
import pickle
# pickle.dump(results, open('max-small-file-{}.pickle'.format(_itr), 'wb'))
#FINAL RESULTS
arr = results
......@@ -355,7 +315,6 @@ print(ll_avg_val)
print(max(arr[:,0]))
print(min(arr[:,0]))
print(max(arr[:,1]))
print(min(arr[:,1]))
print(yu)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment