Skip to content
Snippets Groups Projects
aff-lam-fixed.py 7.74 KiB
Newer Older


"""
Effect of lambda: Fixed
Dataset-1
"""
import numpy as np
import pandas as pd
import utils
import sbm_core
import math
from itertools import combinations
import itertools
from sklearn.metrics.cluster import adjusted_rand_score

#  Initilaize
np.random.seed(34573251)

results = np.zeros((50,3) , dtype=float)

for itr_no in range(0,50):

    num_roles=2
    num_vertices=20
    num_segments = 4

    NO_SAMPLES= 1850
    group_assignment= np.random.randint(num_roles, size=(num_vertices))

    nodes = np.arange(num_vertices)

    list_of_groups=  [[] for _ in range(num_roles)]

    for idx, val in enumerate(group_assignment):
        list_of_groups[val].append(nodes[idx])

    # print(list_of_groups)

    size_all_pairs = {}
    for k in range(0, num_roles):
        for g in range(k, num_roles):
            U=list_of_groups[k]
            W=list_of_groups[g]

            if k == g:
                size_all_pairs[k,g] = math.comb(len(U), 2)
            if k != g:
                size_all_pairs[k,g] = len(U)*len(W)

    lamda_arr = np.ones((num_roles, num_roles,num_segments) , dtype=float)
    lamda_arr_act = np.zeros((num_roles, num_roles,num_segments) , dtype=float)

    num_levels = 2
    H =num_levels

    # h-level lambda estimates
    lambda_estimates_h = np.random.rand(num_roles, num_roles, H)

    # _itr = 8   # Change _itr from 0 to 8 for large lambda differences
    # _itr = 1
    # yu = (9-_itr)*.1
    # lambda_estimates_h[0,0,:] = [yu, 0.01]
    # lambda_estimates_h[0,1,:] = [0.01, yu]
    # lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
    # lambda_estimates_h[1,1,:] = [yu, yu]

    _itr  = 5 # Change _itr from 0 to 8   for  smaller lambda differences
    yu = (9-_itr)*.01
    lambda_estimates_h[0,0,:] = [yu, 0.01]
    lambda_estimates_h[0,1,:] = [0.01, yu]
    lambda_estimates_h[1,0,:] = lambda_estimates_h[0,1,:]
    lambda_estimates_h[1,1,:] = [yu, yu]


    l1 =list(range(0, H))
    l2 = []
    if num_segments > num_levels:
        l2 = [np.random.randint(0,H) for i in range(num_segments-H)]

    # Mapping from segment to a level
    g_mapping= np.array(l1 + l2)
    # print('g mapping {}'.format(g_mapping))
    #Initialize  lamda
    lamda_arr = np.zeros((num_roles, num_roles,num_segments) , dtype=float)
    for d in range(0, num_segments):
        lamda_arr[:,:, d]= lambda_estimates_h[:,:,g_mapping[d]]

    change_points_arr = np.zeros((num_roles, num_roles, num_segments+1) , dtype=int)
    df_all= None

    points= list(range(0, (num_segments+1)*NO_SAMPLES, NO_SAMPLES))
    list1 = []

    #  Generate piecewise non-homogeneous poisson process
    for k in range(0, num_roles):
            for g in range(k, num_roles):
                comb = []
                if k == g:
                    comb = list(combinations(list_of_groups[k], 2))
                    # print(type(comb))
                else:
                    # comb = []
                    key_data = [list_of_groups[k],list_of_groups[g],]
                    comb = list(itertools.product(*key_data))
                    # print(comb)
                if len(comb) != size_all_pairs[k,g]:
                    print('not equal..')

                change_points_arr[k,g,:] = points
                lamda_arr[k,g,:] = lamda_arr[g,k,:]

                tot_count = np.zeros((num_segments) , dtype=float)

                for pair in comb:

                    for d in range(0,num_segments):

                        s = np.random.poisson(lamda_arr[k,g,d], NO_SAMPLES)
                        # print(np.count_nonzero(s))
                        tot_count[d] += np.count_nonzero(s)

                        list1=[i for i, e in enumerate(s) if e != 0]

                        if len(list1) == 0:
                            print('zero')

                        list1 = [x+points[d] for x in list1]

                        df = pd.DataFrame(data=list1)
                        df.columns =['timestamp']


                        N= df.size

                        list_start_stations =[pair[0]] * N
                        list_end_stations =[pair[1]] * N

                        df['source'] = list_start_stations
                        df['target'] = list_end_stations

                        df_all=pd.concat([df_all, df], ignore_index=True)

                for d in range(0,num_segments):
                    lamda_arr_act[k,g,d] = tot_count[d]/(NO_SAMPLES*len(comb))
                    # print(tot_count[d])
    ## Other preparations

    # Remove self loops
    df_all = df_all[((df_all['source'] ) != (df_all['target']))]
    #sort
    df_all=df_all.sort_values('timestamp')
    df_all = df_all[['target', 'timestamp','source']]

    # Save as .csv file
    # df_all.to_csv('./Data/synthetic_ground_truth_g1.csv')

    df=df_all
    dest_folder='./Results/synthetic/3'
    t_df = df['timestamp']

    nodes_arr = np.union1d(df['target'],df['source']).astype(int)
    # list of nodes
    nodes = nodes_arr.tolist()
    num_vertices = len(nodes)

    # node-group dictionary
    group_dic = {}
    keys = nodes
    values = list(group_assignment)
    group_dic = dict(zip(keys,values))


    # create a new dictionary - key: node-pair , value:  list of timestamps
    dic=df.groupby(['source','target'])['timestamp'].apply(list).to_dict()
    # print('{} {} {} '.format(group_dic, lamda_arr_act,change_points_arr))


    def _swap (row):
        if row['source'] > row['target']:
            row['source'] , row['target'] =row['target'] , row['source']
        return row

    # Undirected graph
    df=df.apply(lambda row: _swap(row), axis=1)
    #scale timestamps for zeroth reference point
    refValue = df['timestamp'].min()
    df['timestamp'] -= refValue

    # Experiment
    import experiment

    # User parameters
    num_roles=2
    # num_segments=4
    # num_levels=2# Optional arg
    algo_ver=3
    dest_folder='./Results/synthetic/'

    # tuning parameters
    theta = 1e-7
    eta = 1
    tuning_params= {'theta':theta,'eta':eta}


    exp_obj = experiment.Experiment(df,num_roles,num_segments,algo_ver,dest_folder,tuning_params,num_levels,refValue)
    [itr_d,likelihood_d,group_dic_d,lambda_estimates_d,change_points_arr_d] = exp_obj.execute()


    t_df = sorted(t_df)

    chg_points =  change_points_arr_d[0,0,:]
    ranges_arr = [ [chg_points[s]+1,chg_points[s+1]] for s in range(0,len(chg_points)-1)]
    ranges_arr[0][0] = 0
    list_time_stamps  = list(t_df)


    # iterate over timestamps list
    dis_arr = list()
    gt_arr = list()


    for item in list_time_stamps:

        # find the segment which the timestamp belongs
        # (is dependent on which groups the two nodes belong)
        d =  sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
        dis_arr.append(d)


    chg_points =  change_points_arr[0,0,:]
    ranges_arr = [ [chg_points[s]+1,chg_points[s+1]] for s in range(0,len(chg_points)-1)]
    ranges_arr[0][0] = 0
    list_time_stamps  = list(t_df)


    # iterate over timestamps list

    for item in list_time_stamps:

        # find the segment which the timestamp belongs
        # (is dependent on which groups the two nodes belong)
        d =  sbm_core._findSegment(ranges_arr, len(ranges_arr) , int(item))
        gt_arr.append(d)


    ind = adjusted_rand_score(gt_arr,dis_arr)
    # print('rand index: seg {} : {}'.format(_itr, ind))

    g1= group_dic_d
    g2= group_dic_d[1]

    ds= list(group_dic_d.values() )
    gt1 = list(g1.values())

    ind_grp=adjusted_rand_score(ds,gt1)
    # print('rand index: group {} : {}'.format(_itr, ind_grp))

    results[itr_no][0] = ind
    results[itr_no][1] = itr_d
    results[itr_no][2] = ind_grp

import pickle
pickle.dump(results, open('large-fixed-file-{}.pickle'.format(_itr), 'wb'))

arr = results
ll_avg_val =    (sum(arr)/len(arr))

#FINAL RESULTS

print(ll_avg_val)
print(max(arr[:,0]))
print(min(arr[:,0]))

print(max(arr[:,1]))
print(min(arr[:,1]))