Skip to content
Snippets Groups Projects
Commit 44579cfb authored by ArktikHunter's avatar ArktikHunter
Browse files

updated to run on python 3.10

parent 4e7db60a
No related branches found
No related tags found
1 merge request!21pydtn agkmeans and version 1.0
Pipeline #10647 failed
"""Example to run a batch of simlations on SHED data."""
__author__ = 'Jarrod Pas <j.pas@usask.ca>'
__authors__ = 'Jarrod Pas <j.pas@usask.ca>, Hunter McConnell <hunter.mcconnell@usask.ca>'
import os
import sys
import csv
from argparse import ArgumentParser
from collections import namedtuple
from multiprocessing import Pool
......@@ -68,7 +70,7 @@ def main(args):
trace = args['shed']
node_types = [
Node,
Node, # direct delivery
EpidemicNode,
BubbleKCliqueNode,
HCBFKCliqueNode,
......@@ -77,12 +79,35 @@ def main(args):
]
for seed in args['seeds']:
if seed is not None:
seed = seed[0] #hacky, gets rid of TypeError: unhashable type: 'list'
for node_type in node_types:
sim = Simulation(trace=trace, node_type=node_type, seed=seed)
simulations.append(sim)
results = {}
for stats in pool.imap_unordered(run_simulation, simulations):
log(stats)
if not args['quiet']:
log(stats)
type = stats['node_type']
if type not in results:
results[type] = []
results[type].append(stats)
# find unused filename
i = 0
while os.path.exists(f"results{i}.csv"):
i += 1
# dump sim stats in csv
with open(f"results{i}.csv", 'w', newline='') as results_file:
for node_type in results:
fieldnames = results[node_type][0].keys()
writer = csv.DictWriter(results_file, fieldnames=fieldnames)
writer.writeheader()
for result in results[node_type]:
writer.writerow(result)
def parse_args(args):
......@@ -91,8 +116,9 @@ def parse_args(args):
parser.add_argument('shed')
parser.add_argument('--pretty', action='store_true')
parser.add_argument('--quiet', '-q', action='store_true')
parser.add_argument('--seeds', '-s',
metavar='SEED', type=int, nargs='+', default=[None])
action='append', metavar='SEED', type=int, nargs='+', default=[None])
args = parser.parse_args(args)
return vars(args)
......
......@@ -21,32 +21,32 @@ def main(args):
seen = row[args['seen_column']]
if time >= 0:
seers.add(seer)
observations.add((time, seer, seen))
observations.add((time, seer, seen)) # set of all meetings logged
graph = nx.Graph()
for time, seer, seen in observations:
if seen not in seers:
if seen not in seers: # filters out external nodes? (nodes not participating in study)
continue
if not graph.has_edge(seer, seen):
graph.add_edge(seer, seen, {'times': set()})
graph[seer][seen]['times'].add(time)
graph[seer][seen]['times'].add(time) # edge contains a set of every cycle in which at least one of these two nodes saw the other
nodes = max(nx.connected_components(graph), key=len)
nodes = max(nx.connected_components(graph), key=len) # in taking the largest connected component, filters out isolated nodes?
nodes = {node: index for index, node in enumerate(nodes)}
contacts = []
for node_a, node_b, times in graph.edges(nbunch=nodes, data='times'):
for node_a, node_b, times in graph.edges(nbunch=nodes, data='times'): # again, filters isolated nodes
times = sorted(times)
node_a, node_b = nodes[node_a], nodes[node_b]
for _, group in groupby(enumerate(times), lambda p: p[0]-p[1]):
contact = list(map(lambda g: g[1], group))
for _, group in groupby(enumerate(times), lambda p: p[0]-p[1]): # not sure how, but this blocks creates the join (true) and leave (false)
contact = list(map(lambda g: g[1], group)) # at the first duty cycle in which they meet and the last cycle before they do not meet
contacts.append((contact[0], node_a, node_b, True))
contacts.append((contact[-1] + 1, node_a, node_b, False))
contacts.sort(key=lambda c: c[0])
contacts.sort(key=lambda c: c[0]) # sort contacts by time
start = contacts[0][0]
duration = contacts[-1][0] - start
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment