New repositiory containing only code and no report, supporting files or

files copyrighted like the project specification
This commit is contained in:
Peter 2022-10-24 21:04:39 +08:00
commit 522671e4c2
18 changed files with 1890 additions and 0 deletions

493
.gitignore vendored Normal file
View File

@ -0,0 +1,493 @@
test/
ZZZ_ignore/
snapshots/
*.bin
# Compiled class file
*.class
# Log file
*.log
# BlueJ files
*.ctxt
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# Package Files #
*.jar
*.war
*.nar
*.ear
*.zip
*.tar.gz
*.rar
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
~.*.docx
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
## Core latex/pdflatex auxiliary files:
*.aux
*.lof
*.log
*.lot
*.fls
*.out
*.toc
*.fmt
*.fot
*.cb
*.cb2
.*.lb
## Intermediate documents:
*.dvi
*.xdv
*-converted-to.*
# these rules might exclude image files for figures etc.
# *.ps
# *.eps
# *.pdf
## Generated if empty string is given at "Please type another file name for output:"
.pdf
## Bibliography auxiliary files (bibtex/biblatex/biber):
*.bbl
*.bcf
*.blg
*-blx.aux
*-blx.bib
*.run.xml
## Build tool auxiliary files:
*.fdb_latexmk
*.synctex
*.synctex(busy)
*.synctex.gz
*.synctex.gz(busy)
*.pdfsync
## Build tool directories for auxiliary files
# latexrun
latex.out/
## Auxiliary and intermediate files from other packages:
# algorithms
*.alg
*.loa
# achemso
acs-*.bib
# amsthm
*.thm
# beamer
*.nav
*.pre
*.snm
*.vrb
# changes
*.soc
# comment
*.cut
# cprotect
*.cpt
# elsarticle (documentclass of Elsevier journals)
*.spl
# endnotes
*.ent
# fixme
*.lox
# feynmf/feynmp
*.mf
*.mp
*.t[1-9]
*.t[1-9][0-9]
*.tfm
#(r)(e)ledmac/(r)(e)ledpar
*.end
*.?end
*.[1-9]
*.[1-9][0-9]
*.[1-9][0-9][0-9]
*.[1-9]R
*.[1-9][0-9]R
*.[1-9][0-9][0-9]R
*.eledsec[1-9]
*.eledsec[1-9]R
*.eledsec[1-9][0-9]
*.eledsec[1-9][0-9]R
*.eledsec[1-9][0-9][0-9]
*.eledsec[1-9][0-9][0-9]R
# glossaries
*.acn
*.acr
*.glg
*.glo
*.gls
*.glsdefs
*.lzo
*.lzs
*.slg
*.slo
*.sls
# uncomment this for glossaries-extra (will ignore makeindex's style files!)
# *.ist
# gnuplot
*.gnuplot
*.table
# gnuplottex
*-gnuplottex-*
# gregoriotex
*.gaux
*.glog
*.gtex
# htlatex
*.4ct
*.4tc
*.idv
*.lg
*.trc
*.xref
# hyperref
*.brf
# knitr
*-concordance.tex
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
# *.tikz
*-tikzDictionary
# listings
*.lol
# luatexja-ruby
*.ltjruby
# makeidx
*.idx
*.ilg
*.ind
# minitoc
*.maf
*.mlf
*.mlt
*.mtc[0-9]*
*.slf[0-9]*
*.slt[0-9]*
*.stc[0-9]*
# minted
_minted*
*.pyg
# morewrites
*.mw
# newpax
*.newpax
# nomencl
*.nlg
*.nlo
*.nls
# pax
*.pax
# pdfpcnotes
*.pdfpc
# sagetex
*.sagetex.sage
*.sagetex.py
*.sagetex.scmd
# scrwfile
*.wrt
# svg
svg-inkscape/
# sympy
*.sout
*.sympy
sympy-plots-for-*.tex/
# pdfcomment
*.upa
*.upb
# pythontex
*.pytxcode
pythontex-files-*/
# tcolorbox
*.listing
# thmtools
*.loe
# TikZ & PGF
*.dpth
*.md5
*.auxlock
# titletoc
*.ptc
# todonotes
*.tdo
# vhistory
*.hst
*.ver
# easy-todo
*.lod
# xcolor
*.xcp
# xmpincl
*.xmpi
# xindy
*.xdy
# xypic precompiled matrices and outlines
*.xyc
*.xyd
# endfloat
*.ttt
*.fff
# Latexian
TSWLatexianTemp*
## Editors:
# WinEdt
*.bak
*.sav
# Texpad
.texpadtmp
# LyX
*.lyx~
# Kile
*.backup
# gummi
.*.swp
# KBibTeX
*~[0-9]*
# TeXnicCenter
*.tps
# auto folder when using emacs and auctex
./auto/*
*.el
# expex forward references with \gathertags
*-tags.tex
# standalone packages
*.sta
# Makeindex log files
*.lpz
# xwatermark package
*.xwm
# REVTeX puts footnotes in the bibliography by default, unless the nofootinbib
# option is specified. Footnotes are the stored in a file with suffix Notes.bib.
# Uncomment the next line to have this generated file ignored.
#*Notes.bib

View File

@ -0,0 +1,78 @@
from abc import ABC, abstractmethod
import pickle
import shelve
from typing import Dict, List, Tuple, Union
import etc.gamestate as gs
from etc.messages import MESSAGE_UNDEFINED, Message
from etc.util import NoCopyShelf, RecursiveDict
from agents.action_state import ActionState
class AbstractInfluencerAgent(ABC):
last_message: Message
# Number of red opinions [int]
# Number of blue opinions [int]
# Number of red followers [int]
# Blue energy [int - convert from continuous float to discrete]
state_action_lut: Dict[int, Dict[int, Dict[int, Dict[int, Dict[int, List[Union[int, float]]]]]]]
__state_action_lut: NoCopyShelf
short_term_mem: List[ActionState]
def __init__(self, state_action_lut_path: str) -> None:
self.last_message = MESSAGE_UNDEFINED
self.__state_action_lut = NoCopyShelf.open(
state_action_lut_path,
protocol=pickle.HIGHEST_PROTOCOL,
writeback=True # Yes this makes it less performant, but it will be more readable.
)
# Create data for new shelve
try:
self.__state_action_lut["data"]
except KeyError:
self.__state_action_lut["data"] = RecursiveDict()
self.state_action_lut = self.__state_action_lut["data"]
self.short_term_mem = []
@abstractmethod
def influence(self, state: "gs.GameState", *args, **kwargs) -> None:
pass
@abstractmethod
def smart_influence(self, state: "gs.GameState") -> None:
pass
@abstractmethod
def choices() -> List[Tuple]:
pass
@abstractmethod
def update_short_term_mem(self, old_state: "gs.GameState", resulting_state: "gs.GameState") -> None:
pass
@abstractmethod
def update_lut(self) -> None:
pass
def __update_lut_rating__(self, action_state: ActionState, rating: float) -> None:
# Number of red opinions [int]
# Number of blue opinions [int]
# Number of red followers [int]
# Blue energy [int - convert from continuous float to discrete]
# Action
previous_state_ratings: List[int, float] = self.state_action_lut[action_state.n_red_opinion_bin][action_state.n_blue_opinion_bin][
action_state.n_red_followers_bin][action_state.blue_energy_bin]
action = action_state.action.id
if (type(previous_state_ratings[action]) != list):
previous_state_ratings[action] = [0, 0.0]
previous_state_ratings[action][0] += 1
previous_state_ratings[action][1] += rating
def close_lut(self) -> None:
self.__state_action_lut.close()
def sync_lut(self) -> None:
self.__state_action_lut.sync()

75
agents/action_state.py Normal file
View File

@ -0,0 +1,75 @@
from typing import Union
from etc.messages import Message
import etc.gamestate as gs
# Keep the following in short-term memory, for each potency option used.
# STATE VARIABLES
# Number of red opinions [int]
# Number of blue opinions [int]
# Number of red followers [int]
# Blue energy [int - convert from continuous float to discrete]
#
# PARAMETERS IN CALCULATION
# Round number
# Change in red opinion
# Change in blue opinion
# Change in red followers
# Change in blue energy
# Used to update state_action_lut at end of game by the heuristic,
# For blue:
# ((Change in blue opinions) - (Change in red opinions) - (Change in Red followers) - (Change in blue energy)) * (Rounds to win)
# Red has the opposite heuristic
# There, learning. Are ya happy?????
class ActionState:
BINS = 5
# STATE (digitized from continuous)
n_blue_opinion_bin: int
n_red_opinion_bin: int
n_red_followers_bin: int
blue_energy_bin: int
iteration: int
# ACTION
action: Message
# RESULTING STATE
change_n_blue_opinion: int
change_n_red_opinion: int
change_n_red_followers: int
change_blue_energy: int
# Assume value is between 0 and range.
@staticmethod
def bin(value: Union[float, int], bins: int, range: Union[float, int]) -> int:
clamp_value = max(min(value, range), 0)
return int(clamp_value / (range / (bins - 1)))
def __init__(self, action: Message, start_state: "gs.GameState", next_state: "gs.GameState") -> None:
start_n_red_opinion, start_n_blue_opinion = start_state.count_majority()
next_n_red_opinion, next_n_blue_opinion = next_state.count_majority()
green_population = start_state.n_green_agents
max_energy = start_state.blue_agent.initial_energy
# STATE
self.n_red_opinion_bin = ActionState.bin(start_n_red_opinion, ActionState.BINS, green_population)
self.n_blue_opinion_bin = ActionState.bin(start_n_blue_opinion, ActionState.BINS, green_population)
self.n_red_followers_bin = ActionState.bin(start_state.red_agent.red_followers, ActionState.BINS, green_population)
self.blue_energy_bin = ActionState.bin(start_state.blue_agent.blue_energy, ActionState.BINS, max_energy)
# ACTION
self.iteration = start_state.iteration
self.action = action
self.change_n_blue_opinion = next_n_blue_opinion - start_n_blue_opinion
self.change_n_red_opinion = next_n_red_opinion - start_n_red_opinion
self.change_n_red_followers = next_state.red_agent.red_followers - start_state.red_agent.red_followers
self.change_blue_energy = next_state.blue_agent.blue_energy - start_state.blue_agent.blue_energy
pass
# Relative to the blue agent - invert for red agent.
def rate_state_action(self, round_end: int, lost: bool) -> float:
local_effect = self.change_n_blue_opinion + self.change_n_red_opinion + self.change_blue_energy + self.change_n_red_followers
return (-1 if lost else 1) * (round_end - self.iteration) * local_effect

140
agents/blue.py Normal file
View File

@ -0,0 +1,140 @@
import math
from random import Random
import statistics
from typing import List, Tuple
import etc.gamestate as gs
from agents.green import GreenAgent
from agents.gray import GrayAgent
from etc.messages import BLUE_MESSAGES, MESSAGE_BLUE_SPY, MESSAGE_UNDEFINED, Message, Opinion
from agents.abstract_influencer_agent import AbstractInfluencerAgent
from etc.util import RecursiveDict
from agents.action_state import ActionState
from play_config import BLUE_AGENT_LUT_PATH
class BlueAgent(AbstractInfluencerAgent):
# Energy level for blue team
blue_energy: float
initial_energy: float
blue_count: int
rand: Random
# Learn from gray agents
gray_mem: List[Opinion]
# Number of red opinions [int]
# Number of blue opinions [int]
# Number of red followers [int]
# Blue energy [int - convert from continuous float to discrete]
# Action
def __init__(self, initial_energy: float, rand: Random) -> None:
super().__init__(BLUE_AGENT_LUT_PATH)
self.blue_energy = initial_energy
self.initial_energy = initial_energy
self.blue_count = 0
self.rand = rand
self.gray_mem = [Opinion.BLUE, Opinion.RED] # Assume 0.5 probability of spy initially.
# > For a blue agent, the options will consist of a) - 10 correction messages (Please
# > come up with some fictitious messages), uncertainty value and associated energy loss
#
# > blue team can push a counter-narrative and interact with green team members.
# > However, if they invest too much by interacting with a high certainty,
#
# > blue team is changing the opinion of the green team members
# Interpreting this as blue team being able to affect the uncertainty of green
# nodes, NOT interacting implying being able to directly switch them to blue
# by calling person.attempt_switch(Opinion.BLUE)
# It is stated that blue can "interact with green team members", so I am
# interpreting this as interacting/influencing ALL green members to vote, not
# just those with a certain opinion. TO compensate the potency of blue
# Messages will be halved.
@staticmethod
def blue_action(person: GreenAgent, message: Message) -> None:
if person.polarity == Opinion.RED:
person.uncertainty += message.potency
# > the blue team is changing the opinion of the green team members. Blue team
# > also has an option to let a grey agent in the green network. That agent
# > can be thought of as a life line, where blue team gets another chance of
# > interaction without losing “energy”. However, the grey agent can be a
# > spy from the red team
# So blue team can directly attempt to make people's opinions switch at the
# cost of fuel (Unlike red team, which can only decrease red people's certainty
# but not directly cause people to switch to red team), or use a spy.
def influence(self, gs: "gs.GameState", message: Message) -> None:
self.last_message = message
if message == MESSAGE_BLUE_SPY:
gray: GrayAgent = self.rand.choice(gs.gray_agents)
gray.gray_action(gs)
# Remember the action
self.gray_mem.append(gray.polarity)
else:
for person in gs.green_agents.nodes(data=False):
self.blue_energy -= message.cost
obj_person: GreenAgent = gs.get_green_agent(person)
BlueAgent.blue_action(obj_person, message)
def choices() -> List[Tuple]:
choices = [x for x in BLUE_MESSAGES]
choices.append(MESSAGE_BLUE_SPY)
return choices
def gray_mean(self) -> float:
return statistics.mean([1 if x == Opinion.BLUE else 0 for x in self.gray_mem])
def choose_gray(self) -> bool:
return self.rand.uniform(0.0, 1.0) < self.gray_mean()
# Random process - used to balance the game
def dumb_influence(self, state: "gs.GameState") -> None:
self.influence(state, state.rand.choice(BlueAgent.choices()))
def smart_influence(self, state: "gs.GameState") -> None:
if self.choose_gray():
self.influence(state, MESSAGE_BLUE_SPY)
return
red_opinion, blue_opinion = state.count_majority()
n_red_opinion_bin = ActionState.bin(red_opinion, ActionState.BINS, state.n_green_agents)
n_blue_opinion_bin = ActionState.bin(blue_opinion, ActionState.BINS, state.n_green_agents)
n_red_followers_bin = ActionState.bin(state.red_agent.red_followers, ActionState.BINS, state.n_green_agents)
blue_energy_bin = ActionState.bin(state.blue_agent.blue_energy, ActionState.BINS, state.blue_agent.initial_energy)
previous_rating: RecursiveDict[List[int, float]] = self.state_action_lut[n_red_opinion_bin][n_blue_opinion_bin][
n_red_followers_bin][blue_energy_bin]
n_samples = 1
largest_rating = -math.inf
largest_action = -1
for option in previous_rating:
option_rating_fractional = previous_rating[option]
rating = option_rating_fractional[1] / option_rating_fractional[0] # Average
if rating > largest_rating:
largest_rating = rating
largest_action = option
n_samples += option_rating_fractional[0]
message: Message
# Use 1/sqrt(x)
if self.rand.uniform(0.0, 1.0) < 1 / math.sqrt(n_samples):
message = state.rand.choice(BLUE_MESSAGES)
else:
message = BLUE_MESSAGES[largest_action]
self.influence(state, message)
def update_lut(self, terminal_state: "gs.GameState") -> None:
blue_winner = terminal_state.winner == gs.Winner.BLUE_WIN or terminal_state.winner == gs.Winner.RED_NO_FOLLOWERS
for action_state in self.short_term_mem:
rating = action_state.rate_state_action(terminal_state.iteration, blue_winner)
self.__update_lut_rating__(action_state, rating)
def update_short_term_mem(self, start_state: "gs.GameState", next_state: "gs.GameState") -> None:
if self.last_message != MESSAGE_BLUE_SPY:
self.short_term_mem.append(ActionState(self.last_message, start_state, next_state))

39
agents/gray.py Normal file
View File

@ -0,0 +1,39 @@
from random import Random
from typing import List
import agents.blue as blue
from agents.green import GreenAgent
import agents.red as red
from etc.messages import Opinion
import etc.gamestate as gs
from etc.messages import Message, BLUE_MESSAGES, RED_MESSAGES
class GrayAgent:
polarity: Opinion
weights: List[float]
rand: Random
def __init__(self, polarity: Opinion, rand: Random) -> None:
self.polarity = polarity
self.rand = rand
def choose_message(self) -> Message:
if self.polarity == Opinion.BLUE:
return self.rand.choices(BLUE_MESSAGES, weights=self.weights, k=1)[0]
else:
return self.rand.choices(RED_MESSAGES, weights=self.weights, k=1)[0]
def gray_action(self, state: "gs.GameState"):
# TODO: There is no reason for gray not to play the highest potency message
# as it has no penalty for playing high potency messages.
if self.polarity == Opinion.BLUE:
message: Message = BLUE_MESSAGES[4]
for person in state.green_agents.nodes(data=False):
obj_person: GreenAgent = state.green_agents.nodes[person]['data']
blue.BlueAgent.blue_action(obj_person, message)
else:
message: Message = RED_MESSAGES[4]
for person in state.green_agents.nodes(data=False):
obj_person: GreenAgent = state.green_agents.nodes[person]['data']
red.RedAgent.red_action(obj_person, message)

42
agents/green.py Normal file
View File

@ -0,0 +1,42 @@
from random import Random
from typing import Tuple
from etc.custom_float import Uncertainty
from etc.messages import Opinion
OPINION_INFLUENCE = 0.2
class GreenAgent:
uncertainty: Uncertainty
polarity: Opinion
following_red: bool
rand: Random
def __init__(self, uncertainty: float, polarity: Opinion, rand: Random) -> None:
self.uncertainty = uncertainty
self.polarity = polarity
self.following_red = True
self.rand = rand
def attempt_swap(self):
# > To make it simple, the more positive a value is the more uncertain the
# > agent is and the more negative the value is the more certain the agent is
# Source: Help3001
if self.rand.uniform(Uncertainty.UNCERTAINTY_MIN, Uncertainty.UNCERTAINTY_MAX) < self.uncertainty:
self.polarity = Opinion.opposite(self.polarity)
def attempt_switch(self, polarity: Opinion) -> Opinion:
# > To make it simple, the more positive a value is the more uncertain the
# > agent is and the more negative the value is the more certain the agent is
# Source: Help3001
if self.rand.uniform(Uncertainty.UNCERTAINTY_MIN, Uncertainty.UNCERTAINTY_MAX) < self.uncertainty:
self.polarity = polarity
return polarity
return Opinion.UNDEFINED
def unfollow_red(self):
self.following_red = False
def clone(self) -> "GreenAgent":
return GreenAgent(self.uncertainty.clone(), self.polarity, self.rand)

102
agents/red.py Normal file
View File

@ -0,0 +1,102 @@
# Game Agents
import math
from random import Random
from typing import List, Tuple
from etc.messages import MESSAGE_UNDEFINED, RED_MESSAGES, Message, Opinion
from agents.green import GreenAgent
import etc.gamestate as gs
from agents.abstract_influencer_agent import AbstractInfluencerAgent
from agents.action_state import ActionState
from etc.util import RecursiveDict
from play_config import RED_AGENT_LUT_PATH
class RedAgent(AbstractInfluencerAgent):
# > A highly potent message may result in losing followers i.e., as compared
# > to the last round fewer green team members will be able to interact with
# > the red team agent
# Assume everyone is a follower at the start
red_followers: int # Number of green agents listening to red team
red_count: int # Number of green agents with Opinion.RED
rand: Random
def __init__(self, initial_followers: int, rand: Random) -> None:
super().__init__(RED_AGENT_LUT_PATH)
self.red_followers = initial_followers
self.red_count = 0
self.rand = rand
# > However, a potent message may decrease the uncertainity of opinion among
# > people who are already under the influence of the red team
@staticmethod
def red_action(person: GreenAgent, message: Message):
if person.following_red and person.polarity == Opinion.RED:
person.uncertainty -= message.potency
def influence(self, state: "gs.GameState", message: Message) -> None:
# No need to deepcopy() since each interaction only affects each person once.
self.last_message = message
for person in state.green_agents.nodes(data=False):
obj_person: GreenAgent = state.get_green_agent(person)
# > A highly potent message may result in losing followers i.e., as compared
# > to the last round fewer green team members will be able to interact
# > with the red team agent
if self.rand.random() < message.cost:
if obj_person.following_red:
obj_person.unfollow_red()
self.red_followers -= 1
# > However, a potent message may decrease the uncertainity of opinion among
# > people who are already under the influence of the red team
else:
RedAgent.red_action(obj_person, message)
# Random process
def dumb_influence(self, state: "gs.GameState") -> None:
self.influence(state, state.rand.choice(RED_MESSAGES))
def smart_influence(self, state: "gs.GameState") -> None:
red_opinion, blue_opinion = state.count_majority()
n_red_opinion_bin = ActionState.bin(red_opinion, ActionState.BINS, state.n_green_agents)
n_blue_opinion_bin = ActionState.bin(blue_opinion, ActionState.BINS, state.n_green_agents)
n_red_followers_bin = ActionState.bin(state.red_agent.red_followers, ActionState.BINS, state.n_green_agents)
blue_energy_bin = ActionState.bin(state.blue_agent.blue_energy, ActionState.BINS, state.blue_agent.initial_energy)
previous_rating: RecursiveDict[List[int, float]] = self.state_action_lut[n_red_opinion_bin][n_blue_opinion_bin][
n_red_followers_bin][blue_energy_bin]
n_samples = 1
largest_rating = -math.inf
largest_action = 0
for option in previous_rating:
option_rating_fractional = previous_rating[option]
rating = option_rating_fractional[1] / option_rating_fractional[0] # Average
if rating > largest_rating:
largest_rating = rating
largest_action = option
n_samples += option_rating_fractional[0]
message: Message
# Use 1/sqrt(x) - Central-limit theorem
if self.rand.uniform(0.0, 1.0) < 1 / math.sqrt(n_samples):
message = state.rand.choice(RED_MESSAGES)
else:
message = RED_MESSAGES[largest_action]
self.influence(state, message)
def choices() -> List[Tuple]:
return [(x) for x in RED_MESSAGES]
def update_short_term_mem(self, start_state: "gs.GameState", next_state: "gs.GameState") -> None:
self.short_term_mem.append(ActionState(self.last_message, start_state, next_state))
# ((Change in blue opinions) - (Change in red opinions) - (Change in Red followers) - (Change in blue energy)) * (Rounds to win)
# Red has the opposite heuristic
# There, learning. Are ya happy?????
def update_lut(self, terminal_state: "gs.GameState") -> None:
blue_winner = terminal_state.winner == gs.Winner.BLUE_WIN or terminal_state.winner == gs.Winner.RED_NO_FOLLOWERS
for action_state in self.short_term_mem:
# Since this is the red agent, assign negative to blue wins.
rating = -action_state.rate_state_action(terminal_state.iteration, blue_winner)
self.__update_lut_rating__(action_state, rating)

9
agents/state_action.txt Normal file
View File

@ -0,0 +1,9 @@
State variables:
Number of red opinions
Number of blue opinions
Number of red followers
Blue energy
Actions:
The 5 messages
Evaluation of state-action pair:
((Change in red opinions) - (Change in blue opinions)) * (Rounds to win)

65
etc/custom_float.py Normal file
View File

@ -0,0 +1,65 @@
# > Sometimes people say that we would like to just model
# > it by 01.
# > Okay, They're very uncertain.
# > Would be zero and very certain would be won.
# > Okay, you can do that.
# > Okay.
# > So it depends upon your understanding.
# > All right?
# > Yes.
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
# Custom float class which clamps at ends
class Uncertainty(float):
UNCERTAINTY_MIN = 0.0
UNCERTAINTY_MAX = 1.0
UNCERTAINTY_THRESHOLD = 0.2 # Threshold at which a person is "convinced" by their opinion and can be counted as either "voting" or "not voting"
def __init__(self, value: float):
float.__init__(value)
if value.imag != 0.0:
raise ValueError("Must be real")
if value > Uncertainty.UNCERTAINTY_MAX or value < Uncertainty.UNCERTAINTY_MIN:
raise ValueError("Outside of range")
def clamp(__x):
return max(min(__x, Uncertainty.UNCERTAINTY_MAX), Uncertainty.UNCERTAINTY_MIN)
def short_init(__x) -> "Uncertainty":
return Uncertainty(Uncertainty.clamp(__x))
def __add__(self, __x) -> "Uncertainty":
return Uncertainty.short_init(self.real + __x)
def __sub__(self, __x) -> "Uncertainty":
return Uncertainty.short_init(self.real - __x)
def __rsub__(self, __x) -> "Uncertainty":
return Uncertainty.short_init(__x - self.real)
def __radd__(self, __x) -> "Uncertainty":
return self.__add__(__x)
def __mul__(self, __x) -> "Uncertainty":
return Uncertainty.short_init(self.real * __x)
def certainty(self) -> float:
return Uncertainty.UNCERTAINTY_MAX - self.real + Uncertainty.UNCERTAINTY_MIN
def clone(self) -> "Uncertainty":
return Uncertainty(self.real)
# > All right, So what you need to do is that
# > after every interaction, if the opinion changes, then you need
# > to change uncertainty value as well.
# > All right, So, um, and this is the tricky part
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
#
#
# > the project we have just
# > two opinions, and you need to come up with a
# > way to change the uncertainty.
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture

296
etc/gamestate.py Normal file
View File

@ -0,0 +1,296 @@
from enum import Enum
from math import atan, pi
import matplotlib.pyplot as plt
from etc.custom_float import Uncertainty
from etc.messages import Opinion
from copy import deepcopy
from random import Random
from typing import List, Tuple
import networkx as nx
import agents.red as red
import agents.blue as blue
import agents.gray as gray
import agents.green as green
from play_config import DAYS_UNTIL_ELECTION, ENABLE_GRAPHICS, INITIAL_BLUE_ENERGY, INITIAL_UNCERTAINTY_RANGE
class GameState:
iteration: int
winner: "Winner"
iterations_left: int
n_green_agents: int
p_green_agents: float
green_agents: nx.Graph
uncertainty_interval: Tuple[Uncertainty]
gray_agents: List["gray.GrayAgent"]
red_agent: "red.RedAgent"
blue_agent: "blue.BlueAgent"
rand: Random
# Visualization stuff
graphics: bool
viz = {}
# > At the start of the game, you need an initialise function.
def __init__(self,
green_agents: Tuple[int, float, float],
gray_agents: Tuple[int, float],
uncertainty_interval: Tuple[Uncertainty],
seed: int, graphics: bool = ENABLE_GRAPHICS) -> None:
"""_summary_
Args:
green_agents (Tuple[int, float, float]): (number of green agents, probability of connection, probability of initial opinion = blue)
gray_agents (Tuple[int, float]): (number of gray agents, probability of red spy)
uncertainty_interval (Tuple[float]): _description_
seed (int): seed for Random in game
"""
self.graphics = graphics
self.iteration = 0
self.winner = Winner.NO_WINNER
self.rand = Random()
self.rand.seed(seed)
self.uncertainty_interval = uncertainty_interval
self.n_green_agents = green_agents[0]
self.p_green_agents = green_agents[1]
self.green_agents = nx.erdos_renyi_graph(self.n_green_agents, self.p_green_agents)
# Generate gray agents
self.gray_agents = []
for x in range(0, int(gray_agents[0] * gray_agents[1])):
self.gray_agents.append(gray.GrayAgent(Opinion.BLUE, self.rand))
for x in range(int(gray_agents[0] * gray_agents[1]), gray_agents[0]):
self.gray_agents.append(gray.GrayAgent(Opinion.RED, self.rand))
# > You have to assign one or the other opinion to green.
# > Percentage of agents (green) who want to vote in the election, at the start of the [From pdf]
# > The other variable is uncertainity, for your given interval
# > (say it was -0.5, 0.5), you need to generate a sequence of random
# > numbers and assign and then assign green agents a number from that
# > sequence. For say m green agents, you need m numbers, and there
# > can be duplicates!
# Negative numbers can still be "more positive" than lower negative numbers...
# so translating the interval should have NO EFFECT on the probabilities
# So we can assume that the uncertainty is in [-1.0,1.0] but this
# uncertainty interval is only for initialization stage?
for person in self.green_agents.nodes(data=False):
uncertainty: Uncertainty = Uncertainty(self.rand.uniform(uncertainty_interval[0], uncertainty_interval[1]))
polarity = Opinion.RED
if self.rand.random() < green_agents[2]:
polarity = Opinion.BLUE
self.green_agents.nodes[person]['data'] = green.GreenAgent(uncertainty, polarity, self.rand)
self.blue_agent = blue.BlueAgent(INITIAL_BLUE_ENERGY, self.rand)
self.red_agent = red.RedAgent(self.n_green_agents, self.rand)
self.red_agent.red_count, self.blue_agent.blue_count = self.count_majority()
# Visualization
self.viz['pos'] = nx.nx_pydot.graphviz_layout(self.green_agents, prog="fdp")
self.iterations_left = DAYS_UNTIL_ELECTION
@staticmethod
def short_init(green_agents: Tuple[int, float], gray_agents: Tuple[int, float]) -> "GameState":
return GameState(green_agents, gray_agents, INITIAL_UNCERTAINTY_RANGE, None)
def get_green_agent(self, person: int) -> green.GreenAgent:
return self.green_agents.nodes[person]['data']
def close(self) -> None:
self.blue_agent.update_lut(self)
self.blue_agent.close_lut()
self.red_agent.update_lut(self)
self.red_agent.close_lut()
def green_round(self: "GameState") -> "GameState":
next_green_graph: nx.Graph = deepcopy(self.green_agents)
for person in self.green_agents.nodes(data=False):
for connection in self.green_agents.neighbors(person):
# Test nodes
obj_person: "green.GreenAgent" = self.get_green_agent(person).clone()
obj_connection: "green.GreenAgent" = self.get_green_agent(connection).clone()
interaction_result = Opinion.UNDEFINED # = obj_person.interact(obj_connection)
# > green team's turn: all agents who have a connection between them
# > can can have an interaction, the one who is more certain about
# > their opinion may affect the other green.
# Source: Help3001
# > This equation is for this particular paper because they have
# > the opinion on that.
# > The opinions are a real number, okay?
# > And they are on a scale of minus one to
# > plus one
# > for your project.
# > You will also have to write an equation, you have
# > to come up with your own equation.
# > Okay, so that is the part of your project.
# > That equation does not have to be an overly complicated
# > equation.
# > Could be a really small equation.
# > Uh, simple equation.
# > You can you could be updating.
# > Um, uh, the uncertainties by a simple, uh, you know,
# > a small value.
# > Okay, so just think over it.
# > How would you like to do that?
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
# For context:
# > Now, in the project, you had, like, two opinions.
# > Okay, uh, to vote or not to vote.
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
next_: green.GreenAgent
if obj_person.uncertainty < obj_connection.uncertainty:
# Switch the other agent
# next_connection = next_state.green_agents.nodes[connection]['data']
next_connection: green.GreenAgent = next_green_graph.nodes[connection]['data']
interaction_result = next_connection.attempt_switch(obj_person.polarity)
# Rationale: The more certain an agent is the better they are at arguing,
# which causes the target to be more certain about their decision to switch,
# although not completely uncertain since they just switched.
if not interaction_result == Opinion.UNDEFINED:
next_connection.uncertainty -= obj_person.uncertainty.certainty() * green.OPINION_INFLUENCE
else:
# Switch self
next_person: green.GreenAgent = next_green_graph.nodes[person]['data']
interaction_result = next_person.attempt_switch(obj_connection.polarity)
if not interaction_result == Opinion.UNDEFINED:
next_person.uncertainty -= obj_connection.uncertainty.certainty() * green.OPINION_INFLUENCE
# Update totals
# if interaction_result == Opinion.RED:
# self.red_agent.red_count += 1
# self.blue_agent.blue_count -= 1
# elif interaction_result == Opinion.BLUE:
# self.red_agent.red_count -= 1
# self.blue_agent.blue_count += 1
self.green_agents = next_green_graph
self.red_agent.red_count, self.blue_agent.blue_count = self.count_majority()
#
# Visualization
#
def draw_green_network(self) -> None:
if not self.graphics:
return
labels = {}
colors = []
for node in self.green_agents.nodes():
agent = self.get_green_agent(node)
label = "F" if agent.following_red else ""
labels[node] = label
if agent.polarity == Opinion.RED:
colors.append((agent.uncertainty.certainty(), 0, 0))
else:
colors.append((0, 0, agent.uncertainty.certainty()))
nx.draw(self.green_agents, node_color=colors, labels=labels, pos=self.viz['pos'], node_size=70)
plt.savefig("green_graph.png", dpi=600)
plt.close()
def count_majority(self) -> Tuple[int, int]:
red_count: int = 0
blue_count: int = 0
for _, person in self.green_agents.nodes(data="data"):
person: green.GreenAgent
if person.uncertainty < Uncertainty.UNCERTAINTY_THRESHOLD:
if person.polarity == Opinion.BLUE:
blue_count += 1
elif person.polarity == Opinion.RED:
red_count += 1
return red_count, blue_count
def determine_majority(self) -> "Winner":
if self.red_agent.red_count > self.n_green_agents * 0.9:
return Winner.RED_WIN
elif self.blue_agent.blue_count > self.n_green_agents * 0.9:
return Winner.BLUE_WIN
return Winner.NO_WINNER
def update_winner(self) -> None:
# > In order for the Red agent to win (i.e., a higher number of green agents with
# > opinion “not vote”, and an uncertainty less than 0 (which means they are
# > pretty certain about their choice))
#
# > In order for the Blue agent to win (i.e., a higher number of green agents with
# > opinion “vote”, and an uncertainty less than 0 (which means they are pretty
# > certain about their choice))
#
# Higher than what? Assuming it means 50% of the population, otherwise one team can
# win from the start of the game just by comparing to the same metric but
# with the other team.
majority = self.determine_majority()
if not majority == Winner.NO_WINNER:
self.winner = majority
return
# Blue agent loss:
# > If they expend all their energy, the game will end
# Source: Help3001
# > - blue agent dead
if self.blue_agent.blue_energy <= 0:
self.winner = Winner.BLUE_NO_ENERGY
return
# Red agent loss:
# > - red agent lost all followers
if self.red_agent.red_followers <= 0:
self.winner = Winner.RED_NO_FOLLOWERS
return
self.winner = Winner.NO_WINNER
return
def __str__(self) -> str:
return f"""
{self.iteration=}, {self.winner=}
{self.red_agent.red_followers=}, [{self.blue_agent.blue_energy=}, gray_mean={self.blue_agent.gray_mean()} {self.blue_agent.gray_mem=}]
{self.red_agent.red_count=}, {self.blue_agent.blue_count=}
{str(self.red_agent.last_message)} || {str(self.blue_agent.last_message)}
"""
def print_gamestate_pretty(self) -> str:
return f"""
--- ROUND {self.iteration} ---
Population statistics
- People voting (Blue) {self.blue_agent.blue_count}
- People not voting (Red) {self.red_agent.red_count}
Agent state
- Blue energy remaining: {self.blue_agent.blue_energy}
- Red subscribers: {self.red_agent.red_followers}
Last move
- Red: {self.red_agent.last_message.message}
- Blue: {self.blue_agent.last_message.message}
- Last gray outcome: {"UNDEFINED" if len(self.blue_agent.gray_mem) == 2 else self.blue_agent.gray_mem[-1]}
"""
class Winner(Enum):
NO_WINNER = 0
BLUE_WIN = 1
RED_WIN = 2
BLUE_NO_ENERGY = 3
RED_NO_FOLLOWERS = 4
def red_won(winner: "Winner"):
return winner in [Winner.BLUE_NO_ENERGY or Winner.RED_WIN]
def blue_won(winner: "Winner"):
return winner in [Winner.RED_NO_FOLLOWERS or Winner.RED_WIN]
def winner_opinion(winner: "Winner"):
if Winner.red_won(winner):
return Opinion.RED
elif Winner.blue_won(winner):
return Opinion.BLUE
return Opinion.UNDEFINED

92
etc/messages.py Normal file
View File

@ -0,0 +1,92 @@
from enum import Enum
from math import nan
from typing import Union, overload
from etc.custom_float import Uncertainty
from play_config import INITIAL_UNCERTAINTY_RANGE
class Opinion(Enum):
RED = 0
BLUE = 1
UNDEFINED = 2
def opposite(opinion: "Opinion"):
return Opinion.RED if opinion == Opinion.BLUE else Opinion.BLUE
def __str__(self) -> str:
return {
Opinion.RED: "RED",
Opinion.BLUE: "BLUE",
Opinion.UNDEFINED: "?",
}[self]
# > Sometimes people say that we would like to just model
# > it by 01.
# > Okay, They're very uncertain.
# > Would be zero and very certain would be won.
# > Okay, you can do that.
# > Okay.
# > So it depends upon your understanding.
# > All right?
# > Yes.
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
INFLUENCE_FACTOR = 1.0
class Message():
id: int
potency: Uncertainty
cost: float
message: str
def __init__(self, id: int, message: str, potency: float, cost: float) -> None:
self.id = id
self.cost = cost
self.potency = Uncertainty(potency)
self.message = message
def __str__(self) -> str:
return f"{self.potency=}, {self.cost=}, {self.message}"
MESSAGE_UNDEFINED = Message(0, "UNDEFINED", 0.0, 0.0)
# > So why did I mention that you need to have
# > five levels or 10 levels?
# > It was for simplicity, because this is how we normally
# > start the project.
# > If you just want five or 10 levels, that's fine.
# > I'm not going to detect points for that.
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
#
# > And then what else you need to have is for
# > every uncertainty value, either you need to have this in
# > a table, or you just need to define it by
# > an equation.
# Source: CITS3001 - 13 Sep 2022, 11:00 - Lecture
# Using a lookup-table
mul1 = 1
potencymul1 = 0.05
RED_MESSAGES = [
Message(0, "Red message (low)", potencymul1 * 0.1, 0.1 / mul1),
Message(1, "Red message (medlow)", potencymul1 * 0.15, 0.15 / mul1),
Message(2, "Red message (med)", potencymul1 * 0.2, 0.2 / mul1),
Message(3, "Red message (highmed)", potencymul1 * 0.25, 0.25 / mul1),
Message(4, "Red message (high)", potencymul1 * 0.3, 0.3 / mul1),
]
MESSAGE_BLUE_SPY = Message(0, "Gray spy - chance of highest red OR blue message, at no cost", nan, 0.0)
mul2 = 1
potencymul2 = 0.05
BLUE_MESSAGES = [
Message(0, "Blue message (low)", potencymul2 * 0.5 * 0.1, 0.1 / mul2),
Message(1, "Blue message (medlow)", potencymul2 * 0.5 * 0.15, 0.15 / mul2),
Message(2, "Blue message (med)", potencymul2 * 0.5 * 0.2, 0.2 / mul2),
Message(3, "Blue message (highmed)", potencymul2 * 0.5 * 0.25, 0.25 / mul2),
Message(4, "Blue message (high)", potencymul2 * 0.5 * 0.3, 0.3 / mul2),
]

25
etc/util.py Normal file
View File

@ -0,0 +1,25 @@
from shelve import DbfilenameShelf
from play_config import ENABLE_DIAGNOSTIC_MESSAGES
def debug_print(*args):
if ENABLE_DIAGNOSTIC_MESSAGES:
print(*args)
class RecursiveDict(dict):
# defaultdict but smaller
def __missing__(self, key):
value = self[key] = type(self)()
return value
def __deepcopy__(self, memo):
return self
class NoCopyShelf(DbfilenameShelf):
def __deepcopy__(self, memo):
return self
def open(filename, flag='c', protocol=None, writeback=False):
return NoCopyShelf(filename, flag, protocol, writeback)

31
play_config.py Normal file
View File

@ -0,0 +1,31 @@
import sys
from typing import Tuple
from etc.custom_float import Uncertainty
# Enable graphing
ENABLE_GRAPHICS = False
# Debug messages
ENABLE_DIAGNOSTIC_MESSAGES = False
# Serialize each game and save to a file
ENABLE_SNAPSHOTS = False
GAMESTATE_PARAMETER_SNAPSHOT_OUTPUT_DIR = "./snapshots/"
# Check Uncertainty class
INITIAL_UNCERTAINTY_RANGE: Tuple[Uncertainty] = (Uncertainty(0.0), Uncertainty(0.0))
INITIAL_BLUE_ENERGY: float = 300.0
DAYS_UNTIL_ELECTION = sys.maxsize
# Training data path
BLUE_AGENT_LUT_PATH = "blue_training.bin"
RED_AGENT_LUT_PATH = "red_training.bin"
N_GREEN_AGENT = 100
P_GREEN_AGENT_CONNECTION = 0.05
P_GREEN_AGENT_BLUE = 0.5
N_GRAY_AGENT = 10
P_GRAY_AGENT_FRIENDLY = 0.5

198
play_manual.py Normal file
View File

@ -0,0 +1,198 @@
from copy import deepcopy
from datetime import datetime
import math
import pathlib
import pickle
from typing import List
from etc.custom_float import Uncertainty
from etc.gamestate import GameState, Winner
from etc.messages import Message, Opinion
from agents.blue import BlueAgent
from agents.red import RedAgent
import play_config as cfg
def rand_rounds(gs: GameState) -> GameState:
blue_gamestate = deepcopy(gs)
gs.blue_agent.smart_influence(gs)
red_gamestate = deepcopy(gs)
gs.red_agent.smart_influence(gs)
# gs.draw_green_network()
# gs.green_round()
# gs.draw_green_network()
# spy = bool(gs.rand.getrandbits(1))
# spy = False
gs.green_round()
gs.draw_green_network()
gs.red_agent.update_short_term_mem(red_gamestate, gs)
gs.blue_agent.update_short_term_mem(blue_gamestate, gs)
gs.iteration += 1
return gs
def main():
sel_parameters = input_opts("""Would you like to select parameters?
[y] -> Yes
[n] -> No
> """, ['y', 'n'])
if sel_parameters == 'y':
select_parameters()
cfg.ENABLE_SNAPSHOTS = ('y' == input_opts("""Would you like to enable snapshots?
[y] -> Yes
[n] -> No
> """, ['y', 'n']))
if cfg.ENABLE_SNAPSHOTS:
snapshot_path = pathlib.Path(cfg.GAMESTATE_PARAMETER_SNAPSHOT_OUTPUT_DIR)
snapshot_path.mkdir(parents=True, exist_ok=True)
cfg.ENABLE_GRAPHICS = ('y' == input_opts("""Enable graphics?
Note that this option is currently exporting to an image file for compatibility on headless operating systems (WSL)
[y] -> Graphics
[n] -> No graphics
> """, ['y', 'n']))
while True:
print("Starting new game")
state_buffer: List[GameState] = []
# gs: GameState = GameState.short_init((100, 0.05, 0.5), (10, 0.5))
gs: GameState = GameState(
green_agents=(cfg.N_GREEN_AGENT, cfg.P_GREEN_AGENT_CONNECTION, cfg.P_GREEN_AGENT_BLUE),
gray_agents=(cfg.N_GRAY_AGENT, cfg.P_GRAY_AGENT_FRIENDLY),
uncertainty_interval=cfg.INITIAL_UNCERTAINTY_RANGE,
seed=None, graphics=cfg.ENABLE_GRAPHICS)
player = input_opts("""Choose a team
[r] -> Red
[b] -> Blue
[n] -> None
> """, ['r', 'b', 'n'])
player: Opinion = {
'r': Opinion.RED,
'b': Opinion.BLUE,
'n': Opinion.UNDEFINED
}[player]
state_buffer.append(deepcopy(gs))
while gs.winner == Winner.NO_WINNER:
print(gs.print_gamestate_pretty())
print("Blue turn")
blue_gamestate = deepcopy(gs)
if player == Opinion.BLUE:
option = select_potency(BlueAgent.choices())
gs.blue_agent.influence(gs, option)
else:
gs.blue_agent.dumb_influence(gs)
print("Red turn")
red_gamestate = deepcopy(gs)
if player == Opinion.RED:
option = select_potency(RedAgent.choices())
gs.red_agent.influence(gs, option)
else:
gs.red_agent.dumb_influence(gs)
print("Green turn")
gs.green_round()
gs.draw_green_network()
gs.red_agent.update_short_term_mem(red_gamestate, gs)
gs.blue_agent.update_short_term_mem(blue_gamestate, gs)
gs.iteration += 1
gs.update_winner()
state_buffer.append(deepcopy(gs))
gs.close()
print(f"""
Game over
Round {gs.iteration}, reason {gs.winner}
Winner {Winner.winner_opinion(gs.winner)}""")
if player != Opinion.UNDEFINED:
print("YOU WIN 🎉" if Winner.winner_opinion(gs.winner) == player else "YOU LOSE 💀")
print(gs.print_gamestate_pretty())
input("Press enter to continue...")
# Save snapshot of game
if cfg.ENABLE_SNAPSHOTS:
with snapshot_path.joinpath(f"game_snapshot_{datetime.now().isoformat().replace(':','_')}.bin").open("wb") as f:
pickle.dump(state_buffer, f, protocol=pickle.HIGHEST_PROTOCOL)
def select_potency(messages: List[Message]) -> Message:
prompt_str = "Choose a potency\n"
for i in range(0, len(messages)):
prompt_str += f"[{i}] -> {str(messages[i])}\n"
return messages[input_p_int(prompt_str, 0, len(messages))]
def select_parameters() -> None:
# Override config defaults
print("\nBlue agent\n---")
cfg.INITIAL_BLUE_ENERGY = input_float("Blue agent initial energy?\n> ")
print("\nBounds of uniform uncertainty distribution at start of game for green agents\n---")
cfg.INITIAL_UNCERTAINTY_RANGE = (input_uncertainty("Lower\n> "), input_uncertainty("Upper\n> "))
print("\nGreen agents\n---")
cfg.N_GREEN_AGENT = input_p_int("Number of green agents\n> ")
cfg.P_GREEN_AGENT_CONNECTION = input_probability("Probability of green agent connections [0,1]\n> ")
cfg.P_GREEN_AGENT_BLUE = input_p_int("Probability of green agents initialized to blue opinion\n> ")
print("\nGray agents\n---")
cfg.N_GRAY_AGENT = input_p_int("Number of gray agents\n> ")
cfg.P_GRAY_AGENT_FRIENDLY = input_probability("Probability of gray agents being blue\n> ")
def input_probability(prompt: str) -> float:
while True:
value = input_T(prompt, float)
if value > 0.0 and value < 1.0:
return value
else:
print("Invalid input")
def input_p_int(prompt: str, l_=0, u_=math.inf) -> int:
while True:
value = input_T(prompt, int)
if value >= l_ and value < u_:
return value
else:
print("Invalid input")
def input_float(prompt: str) -> float:
return input_T(prompt, float)
def input_uncertainty(prompt: str) -> Uncertainty:
return input_T(prompt, Uncertainty)
def input_T(prompt: str, type):
num: type = None
while num is None:
try:
num = type(input(prompt))
except ValueError:
print("Invalid input")
return num
def input_opts(prompt: str, opts: List[str]) -> str:
option: str = ""
while option not in opts:
if len(option) > 0:
print("Invalid input")
option = input(prompt).strip().lower()
return option
if __name__ == "__main__":
main()

25
requirements.txt Normal file
View File

@ -0,0 +1,25 @@
autopep8==1.7.0
cycler==0.11.0
flake8==5.0.4
fonttools==4.37.1
joblib==1.2.0
kiwisolver==1.4.4
matplotlib==3.5.3
mccabe==0.7.0
networkx==2.8.6
numpy==1.23.2
packaging==21.3
Pillow==9.2.0
pyaml==21.10.1
pycodestyle==2.9.1
pydot==1.4.2
pyflakes==2.5.0
pyparsing==3.0.9
python-dateutil==2.8.2
PyYAML==6.0
scikit-learn==1.1.2
scikit-optimize==0.9.0
scipy==1.9.2
six==1.16.0
threadpoolctl==3.1.0
toml==0.10.2

125
train.py Normal file
View File

@ -0,0 +1,125 @@
import csv
from datetime import datetime
import pathlib
import pickle
from time import time
import numpy as np
from copy import deepcopy
from types import FunctionType
from typing import Callable, Dict, List
from etc.gamestate import GameState, Winner
from etc.util import debug_print
from play_config import ENABLE_SNAPSHOTS, GAMESTATE_PARAMETER_SNAPSHOT_OUTPUT_DIR, INITIAL_UNCERTAINTY_RANGE, N_GRAY_AGENT, N_GREEN_AGENT,\
P_GRAY_AGENT_FRIENDLY, P_GREEN_AGENT_BLUE, P_GREEN_AGENT_CONNECTION
def rand_rounds(gs: GameState) -> GameState:
blue_gamestate = deepcopy(gs)
gs.blue_agent.dumb_influence(gs)
red_gamestate = deepcopy(gs)
gs.red_agent.dumb_influence(gs)
# gs.draw_green_network()
# gs.green_round()
# gs.draw_green_network()
# spy = bool(gs.rand.getrandbits(1))
# spy = False
gs.green_round()
gs.red_agent.update_short_term_mem(red_gamestate, gs)
gs.blue_agent.update_short_term_mem(blue_gamestate, gs)
gs.draw_green_network()
gs.iteration += 1
return gs
def intelligent_rounds(gs: GameState) -> GameState:
blue_gamestate = deepcopy(gs)
gs.blue_agent.smart_influence(gs)
red_gamestate = deepcopy(gs)
gs.red_agent.smart_influence(gs)
gs.green_round()
gs.red_agent.update_short_term_mem(red_gamestate, gs)
gs.blue_agent.update_short_term_mem(blue_gamestate, gs)
gs.draw_green_network()
gs.iteration += 1
return gs
def round(round_func: FunctionType) -> List[GameState]:
state_buffer: List[GameState] = []
# gs: GameState = GameState.short_init((100, 0.05, 0.5), (10, 0.5))
gs: GameState = GameState(
green_agents=(N_GREEN_AGENT, P_GREEN_AGENT_CONNECTION, P_GREEN_AGENT_BLUE),
gray_agents=(N_GRAY_AGENT, P_GRAY_AGENT_FRIENDLY),
uncertainty_interval=INITIAL_UNCERTAINTY_RANGE,
seed=None, graphics=False)
state_buffer.append(deepcopy(gs))
debug_print("INITIAL CONDITIONS.")
debug_print(gs)
debug_print("STARTING GAME.")
# gs.draw_green_network()
while gs.winner == Winner.NO_WINNER:
gs = round_func(gs)
# debug_print(gs.red_agent.red_followers, gs.blue_agent.blue_energy, len(list(gs.green_agents)))
debug_print(gs)
gs.update_winner()
gs.draw_green_network()
state_buffer.append(deepcopy(gs))
# print(gs)
gs.close()
# state_buffer.append(deepcopy(gs))
print(f"{gs.iteration} WINNER {gs.winner}")
return state_buffer
# Calibrator
def training_rounds(win_file_blue, win_file_red, round_func: Callable, training_iterations: int) -> None:
# state_buffer: List[GameState] = round(rand_rounds)
# exit()
ending_states: Dict[Winner, int] = {
Winner.BLUE_NO_ENERGY: 0,
Winner.BLUE_WIN: 0,
Winner.RED_NO_FOLLOWERS: 0,
Winner.RED_WIN: 0,
}
blue_win_round_len = []
red_win_round_len = []
for x in range(0, training_iterations):
t = time()
print(f"Game {x}")
state_buffer: List[GameState] = round(round_func)
ending_state: GameState = state_buffer[-1]
ending_states[ending_state.winner] += 1
if ending_state.winner == Winner.BLUE_NO_ENERGY or ending_state.winner == Winner.RED_WIN:
red_win_round_len.append(ending_state.iteration)
blue_win_round_len.append(-ending_state.iteration)
elif ending_state.winner == Winner.RED_NO_FOLLOWERS or ending_state.winner == Winner.BLUE_WIN:
red_win_round_len.append(-ending_state.iteration)
blue_win_round_len.append(ending_state.iteration)
print(f"dt={time() - t} s")
print(ending_states)
print(blue_win_round_len)
print(red_win_round_len)
with open(win_file_blue, "w") as f:
writer = csv.writer(f)
writer.writerow(blue_win_round_len)
with open(win_file_red, "w") as f:
writer = csv.writer(f)
writer.writerow(red_win_round_len)
if __name__ == "__main__":
N_RANDOM_ROUNDS = 120
N_INTELLIGENT_ROUNDS = 300
training_rounds("rand_blue_win.csv", "rand_red_win.csv", rand_rounds, N_RANDOM_ROUNDS)
training_rounds("intel_blue_win.csv", "intel_red_win.csv", intelligent_rounds, N_INTELLIGENT_ROUNDS)

21
view_snapshot.py Normal file
View File

@ -0,0 +1,21 @@
import argparse
import pickle
from etc.gamestate import GameState
from typing import List
parser = argparse.ArgumentParser(description='View game snapshots.')
parser.add_argument('snapshot_path', metavar='PATH', type=str, nargs=1,
help='path to snapshot file (.bin)')
def main():
args = parser.parse_args()
with open(args.snapshot_path[0], "rb") as f:
gamestates: List[GameState] = pickle.load(f)
for gamestate in gamestates:
print(gamestate)
if __name__ == "__main__":
main()

34
view_training_data.py Normal file
View File

@ -0,0 +1,34 @@
import json
import pickle
from etc.util import NoCopyShelf, RecursiveDict
from etc.messages import BLUE_MESSAGES
def main() -> None:
data = NoCopyShelf.open(
"blue_training.bin",
protocol=pickle.HIGHEST_PROTOCOL,
writeback=True # Yes this makes it less performant, but it will be more readable.
)['data']
for x in data:
for y in data[x]:
for z in data[x][y]:
for w in data[x][y][z]:
d: RecursiveDict = data[x][y][z][w]
d_ = []
samples = []
for k in range(0, len(BLUE_MESSAGES)):
if k in d.keys():
samples.append(str(d[k][0]).ljust(4))
d_.append("{:.2f}".format(d[k][1] / d[k][0]).ljust(7))
else:
samples.append(str(0).ljust(4))
d_.append("_NaN_".ljust(7))
print(f"(red_op={x},blue_op={y},red_f={z},blue_e={w}) | {d_} | {samples}")
# print(json.dumps(data, sort_keys=True, indent=4))
if __name__ == "__main__":
main()