Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pydtnsim
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
discus
pydtnsim
Commits
44579cfb
Commit
44579cfb
authored
2 years ago
by
ArktikHunter
Browse files
Options
Downloads
Patches
Plain Diff
updated to run on python 3.10
parent
4e7db60a
No related branches found
No related tags found
1 merge request
!21
pydtn agkmeans and version 1.0
Pipeline
#10647
failed
2 years ago
Stage: test
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
examples/shed.py
+30
-4
30 additions, 4 deletions
examples/shed.py
util/shed_process
+8
-8
8 additions, 8 deletions
util/shed_process
with
38 additions
and
12 deletions
examples/shed.py
+
30
−
4
View file @
44579cfb
"""
Example to run a batch of simlations on SHED data.
"""
__author__
=
'
Jarrod Pas <j.pas@usask.ca>
'
__author
s
__
=
'
Jarrod Pas <j.pas@usask.ca>
, Hunter McConnell <hunter.mcconnell@usask.ca>
'
import
os
import
sys
import
csv
from
argparse
import
ArgumentParser
from
collections
import
namedtuple
from
multiprocessing
import
Pool
...
...
@@ -68,7 +70,7 @@ def main(args):
trace
=
args
[
'
shed
'
]
node_types
=
[
Node
,
Node
,
# direct delivery
EpidemicNode
,
BubbleKCliqueNode
,
HCBFKCliqueNode
,
...
...
@@ -77,12 +79,35 @@ def main(args):
]
for
seed
in
args
[
'
seeds
'
]:
if
seed
is
not
None
:
seed
=
seed
[
0
]
#hacky, gets rid of TypeError: unhashable type: 'list'
for
node_type
in
node_types
:
sim
=
Simulation
(
trace
=
trace
,
node_type
=
node_type
,
seed
=
seed
)
simulations
.
append
(
sim
)
results
=
{}
for
stats
in
pool
.
imap_unordered
(
run_simulation
,
simulations
):
log
(
stats
)
if
not
args
[
'
quiet
'
]:
log
(
stats
)
type
=
stats
[
'
node_type
'
]
if
type
not
in
results
:
results
[
type
]
=
[]
results
[
type
].
append
(
stats
)
# find unused filename
i
=
0
while
os
.
path
.
exists
(
f
"
results
{
i
}
.csv
"
):
i
+=
1
# dump sim stats in csv
with
open
(
f
"
results
{
i
}
.csv
"
,
'
w
'
,
newline
=
''
)
as
results_file
:
for
node_type
in
results
:
fieldnames
=
results
[
node_type
][
0
].
keys
()
writer
=
csv
.
DictWriter
(
results_file
,
fieldnames
=
fieldnames
)
writer
.
writeheader
()
for
result
in
results
[
node_type
]:
writer
.
writerow
(
result
)
def
parse_args
(
args
):
...
...
@@ -91,8 +116,9 @@ def parse_args(args):
parser
.
add_argument
(
'
shed
'
)
parser
.
add_argument
(
'
--pretty
'
,
action
=
'
store_true
'
)
parser
.
add_argument
(
'
--quiet
'
,
'
-q
'
,
action
=
'
store_true
'
)
parser
.
add_argument
(
'
--seeds
'
,
'
-s
'
,
metavar
=
'
SEED
'
,
type
=
int
,
nargs
=
'
+
'
,
default
=
[
None
])
action
=
'
append
'
,
metavar
=
'
SEED
'
,
type
=
int
,
nargs
=
'
+
'
,
default
=
[
None
])
args
=
parser
.
parse_args
(
args
)
return
vars
(
args
)
...
...
This diff is collapsed.
Click to expand it.
util/shed_process
+
8
−
8
View file @
44579cfb
...
...
@@ -21,32 +21,32 @@ def main(args):
seen
=
row
[
args
[
'
seen_column
'
]]
if
time
>=
0
:
seers
.
add
(
seer
)
observations
.
add
((
time
,
seer
,
seen
))
observations
.
add
((
time
,
seer
,
seen
))
# set of all meetings logged
graph
=
nx
.
Graph
()
for
time
,
seer
,
seen
in
observations
:
if
seen
not
in
seers
:
if
seen
not
in
seers
:
# filters out external nodes? (nodes not participating in study)
continue
if
not
graph
.
has_edge
(
seer
,
seen
):
graph
.
add_edge
(
seer
,
seen
,
{
'
times
'
:
set
()})
graph
[
seer
][
seen
][
'
times
'
].
add
(
time
)
graph
[
seer
][
seen
][
'
times
'
].
add
(
time
)
# edge contains a set of every cycle in which at least one of these two nodes saw the other
nodes
=
max
(
nx
.
connected_components
(
graph
),
key
=
len
)
nodes
=
max
(
nx
.
connected_components
(
graph
),
key
=
len
)
# in taking the largest connected component, filters out isolated nodes?
nodes
=
{
node
:
index
for
index
,
node
in
enumerate
(
nodes
)}
contacts
=
[]
for
node_a
,
node_b
,
times
in
graph
.
edges
(
nbunch
=
nodes
,
data
=
'
times
'
):
for
node_a
,
node_b
,
times
in
graph
.
edges
(
nbunch
=
nodes
,
data
=
'
times
'
):
# again, filters isolated nodes
times
=
sorted
(
times
)
node_a
,
node_b
=
nodes
[
node_a
],
nodes
[
node_b
]
for
_
,
group
in
groupby
(
enumerate
(
times
),
lambda
p
:
p
[
0
]
-
p
[
1
]):
contact
=
list
(
map
(
lambda
g
:
g
[
1
],
group
))
for
_
,
group
in
groupby
(
enumerate
(
times
),
lambda
p
:
p
[
0
]
-
p
[
1
]):
# not sure how, but this blocks creates the join (true) and leave (false)
contact
=
list
(
map
(
lambda
g
:
g
[
1
],
group
))
# at the first duty cycle in which they meet and the last cycle before they do not meet
contacts
.
append
((
contact
[
0
],
node_a
,
node_b
,
True
))
contacts
.
append
((
contact
[
-
1
]
+
1
,
node_a
,
node_b
,
False
))
contacts
.
sort
(
key
=
lambda
c
:
c
[
0
])
contacts
.
sort
(
key
=
lambda
c
:
c
[
0
])
# sort contacts by time
start
=
contacts
[
0
][
0
]
duration
=
contacts
[
-
1
][
0
]
-
start
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment