Support for building, running and deploying the server in docker containers.

See README.txt for more information.
This commit is contained in:
Scott Ludwig 2016-01-07 22:10:13 -08:00
parent 88711f1071
commit 087a9eae7d
21 changed files with 1116 additions and 203 deletions

8
server/.gitignore vendored
View File

@ -2,5 +2,13 @@ debug/*
!debug/.keep
release/*
!release/.keep
docker/debug/*
!docker/debug/.keep
docker/release/*
!docker/release/.keep
docker/wicontent/*
!docker/wicontent/.keep
log/
log.txt
*.pyc
.*.swp

View File

@ -1,50 +1,37 @@
# Compiler command line
# Add warning check for implicit data type resolution loss, otherwise
# gcc is silent.
BINARY=wis
ifeq ($(REL),1)
CPP_FLAGS=-O2 -DRELEASE_LOGGING -Wno-write-strings -I.. -fsigned-char
OUTDIR=release
REL=1
else
CPP_FLAGS=-g -O0 -DDEBUG_LOGGING -DDEBUG -DDEV_BUILD -Wno-write-strings -I.. -fsigned-char
OUTDIR=debug
REL=0
endif
UNAME := $(shell uname)
ifeq ($(UNAME), Darwin)
PRE_CC=g++
CC=$(PRE_CC) -DOSX
SOCKETSERVER=selectserver
CPP_FLAGS:=$(CPP_FLAGS) -DOSX
CPUS=1
endif
ifeq ($(UNAME), Linux)
PRE_CC=g++
CC=$(PRE_CC) -DLINUX
endif
LD=$(CC)
CPP_INCS=-I..
OUTDIR=
BINARY=hts
LDFLAGS=-lpthread
# Choose socketserver type
ifeq ($(shell uname -s),Darwin)
SOCKETSERVER=selectserver
endif
ifeq ($(shell uname -s),Linux)
SOCKETSERVER=epollserver
CPP_FLAGS:=$(CPP_FLAGS) -DLINUX
CPUS=$(shell grep processor /proc/cpuinfo | wc -l)
endif
# Debug is default
ifdef REL
CPP_FLAGS=-DRELEASE_LOGGING -g -Wno-write-strings
OUTDIR=release
OPTFLAGS=-O2
ifneq ($(DOCKER),0)
MAKE_ALL = $(shell docker/config AUTH_PREFIX) docker -- run -t --rm -v "$(shell dirname "$(shell pwd)")":/wi -w /wi -u wi $(shell docker/config REGISTRY_PREFIX)/wi_build:$(shell docker/image_version wi_build) /bin/bash -c "cd server && make REL=$(REL) DOCKER=1 -j $(CPUS) d_all"
INSIDE_CHECK := /bin/bash -c "if [ ! -f /.dockerinit ]; then echo 'ERROR: Outside container.' && exit 1; fi"
OUTSIDE_CHECK := /bin/bash -c "if [ -f /.dockerinit ]; then echo 'ERROR: Inside container.' && exit 1; fi"
else
CPP_FLAGS=-DDEBUG_LOGGING -DDEBUG -DDEV_BUILD -g -Wno-write-strings
OUTDIR=debug
OPTFLAGS=-O0
MAKE_ALL = make REL=$(REL) DOCKER=0 -j $(CPUS) d_all
INSIDE_CHECK :=
OUTSIDE_CHECK :=
endif
ifdef DEV_BUILD
CPP_FLAGS=-DDEV_BUILD $(CPP_FLAGS)
endif
CPPFLAGS=$(OPTFLAGS) $(CPP_FLAGS) $(CPP_INCS) -fsigned-char
MAINFILES=main server endpoint room lobby game levelinfo levelinfocache \
ncpackfile ncpdbreader playermgr player tokenauth statsposter httppost \
serverinfoupdater chatlimiter filewatcher tracker badwords tokenbucket \
@ -60,29 +47,60 @@ INCFILES=rip
ALLFILES=$(INCFILES) $(MAINFILES) $(BASEFILES) $(MPSHAREDFILES) $(YAJLFILESC) \
$(YAJLFILESCPP)
all: $(OUTDIR)/$(BINARY)
all: outside
$(MAKE_ALL)
clean:
rm -f $(OUTDIR)/*
$(OUTDIR)/$(BINARY): $(ALLFILES:%=$(OUTDIR)/%.o)
$(LD) -o $@ $^ $(LDFLAGS)
d_all: inside $(OUTDIR)/$(BINARY)
cp $(OUTDIR)/$(BINARY) docker/$(OUTDIR)
$(OUTDIR)/$(BINARY): $(ALLFILES:%=$(OUTDIR)/%.o)
g++ -o $@ $^ -lpthread
rm -f docker/$(OUTDIR)/$(BINARY)
$(MAINFILES:%=$(OUTDIR)/%.o): $(MAINFILES:%=%.cpp)
$(CC) $(CPPFLAGS) -c $(<D)/$(*F).cpp -o $@
g++ $(CPP_FLAGS) -c $(<D)/$(*F).cpp -o $@
$(BASEFILES:%=$(OUTDIR)/%.o): $(BASEFILES:%=../base/%.cpp)
$(CC) $(CPPFLAGS) -c $(<D)/$(*F).cpp -o $@
g++ $(CPP_FLAGS) -c $(<D)/$(*F).cpp -o $@
$(MPSHAREDFILES:%=$(OUTDIR)/%.o): $(MPSHAREDFILES:%=../mpshared/%.cpp)
$(CC) $(CPPFLAGS) -c $(<D)/$(*F).cpp -o $@
g++ $(CPP_FLAGS) -c $(<D)/$(*F).cpp -o $@
$(INCFILES:%=$(OUTDIR)/%.o): $(INCFILES:%=../inc/%.cpp)
$(CC) $(CPPFLAGS) -c $(<D)/$(*F).cpp -o $@
g++ $(CPP_FLAGS) -c $(<D)/$(*F).cpp -o $@
$(YAJLFILESCPP:%=$(OUTDIR)/%.o): $(YAJLFILESCPP:%=../yajl/wrapper/%.cpp)
$(CC) $(CPPFLAGS) -c $(<D)/$(*F).cpp -o $@
g++ $(CPP_FLAGS) -c $(<D)/$(*F).cpp -o $@
$(YAJLFILESC:%=$(OUTDIR)/%.o): $(YAJLFILESC:%=../yajl/src/%.c)
$(CC) $(CPPFLAGS) -x c -c $(<D)/$(*F).c -o $@
g++ $(CPP_FLAGS) -x c -c $(<D)/$(*F).c -o $@
new_base_build_images: outside
docker/build_image wi_base
docker/build_image wi_build
push_base_build_images: outside
docker/push_image wi_base
docker/push_image wi_build
new_server_image: outside
$(MAKE) REL=1 clean all
docker/build_image wi_server
push_server_image: outside
docker/push_image wi_server
create_gcloud_server: outside
docker/create_gcloud_server
drain_delete_gcloud_server: outside
docker/drain_delete_gcloud_server
outside:
@$(OUTSIDE_CHECK)
inside:
@$(INSIDE_CHECK)

View File

@ -14,177 +14,270 @@ on Google App Engine. This server is called the "leaderboard server", or just
This document will refer to the game running on a mobile device as the "client".
Quick start: how to run locally
-------------------------------
Running the leaderboard
-----------------------
The game server builds / runs on OSX or Linux. The game server does not
build or run on Windows at this time.
Multiplayer server relies on a Google AppEngine application, located in the
stats/ directory, called the leaderboard. This server is used for
authentication, profiles, and game stats. Before running the leaderboard,
choose a Google App Engine app name (the name is never visible):
This is a simple, short instructions
for how to run the game server and leaderboard server locally, for testing
purposes:
https://console.developers.google.com
0. If you're compiling from Ubuntu 14.04 64 bit with gcc 4.8, you'll need
to install a few 32 bit libraries. This list might not be inclusive:
Change the application: line in stats/app.yaml with your app name:
sudo apt-get install libc6-dev:i386
sudo apt-get install libstdc++-4.8-dev:i386
sudo apt-get install gcc-multilib g++-multilib
application: <your Google App Engine app name>
1. From the server directory:
Next edit stats/config.py, and search for REPLACEME_ and change these
'secrets' to unique complex strings. Edit server/secrets.cpp and use the
same secret strings as used in stats/config.py. Make sure these secrets
are never checked into a public repository.
# For release
mkdir release
make REL=1 clean all
# For debug
mkdir debug
make clean all
Now run the leaderboard. If you plan to run locally only, run the app in
stats/, and use 8080 for the port. If you plan to run the leaderboard
remotely, push the app to Google app engine.
2. From the server directory, tar xvf testdata.tar.gz
This is test data for the server. For the full mission pack list, see below
in this document.
Preparing the game client
-------------------------
3. Run the App Engine app in the stats directory locally using the GAE
launcher. Show the GAE log console for debug output
1. Modify game/serviceurls.cpp with the appropriate leaderboard urls
from above that you are using.
4. Modify start.sh with the appropriate port you're using for the locally
running instance of the stats GAE app
5. To run the server, use start.sh locally for a sample set of command line
flags: . start.sh
6. Build the game under Debug so the localhost service urls are used
(see game/serviceurls.cpp).
7. Run the game in the simulator. Press Play -> Multiplayer.
Please continue reading for how to deploy on a public server.
How to run in production
------------------------
1. Create a Google App Engine application name here:
https://appengine.google.com/
It doesn't matter what the name is, the user will never see it.
2. Change the application: line in ../stats/app.yaml with your app name:
application: <your Google App Engine app name>
3. Deploy this application to Google App Engine using the
GoogleAppEngineLauncher app that comes with the SDK.
4. Modify ../game/serviceurls.cpp, and replace where you see <GAE APPNAME>
with your GAE app name. These service urls are now permanent.
5. Rebuild the server on a Linux system
$ cd server
$ mkdir release
$ make REL=1 clean all
6. Copy start.sh, release/hts, and htdata832.pdb to a publicly reachable
Linux system
$ scp start.sh you@your_server.com:
$ scp release/hts you@your_server.com:
$ scp ../game/htdata832.pdb you@your_server.com:
7. Download a copy of the 5000+ single and multiplayer maps, and copy this
to your server as well
$ wget http://www.warfareincorporated.com/~wicontent/wicontent.tar.gz
$ scp wicontent.tar.gz you@your_server.com:
8. ssh to you@your_server.com and perform the following:
$ <install apache>
$ cd <to the apache document root>
$ tar xvf wicontent.tar.gz (probably need to be root to do this)
(you'll now have a wi subdirectory in your apache document root)
9. Back on your local system, modify ../game/serviceurls.cpp, kszIndexUrl,
kszPackInfoUrl, and kszPackUrl as follows:
2. Modify game/serviceurls.cpp to point to an http server that serves
mission packs (see testdata.tar.gz for layout):
const char *kszIndexUrl = "http://<your_server.com>/wi/index";
const char *kszPackInfoUrl = "http://<your_server.com>/wi/info";
const char *kszPackUrl = "http://<your_server.com>/wi/pack";
This tells the client how to download mission packs. It will also be needed
by the server.
Recompile the client.
10. Rebuild your client with the changes that were made to serviceurls.cpp.
Customizing server/docker/config.json
-------------------------------------
11. Back on your_server.com machine, cd into the directory containing
start.sh, hts, and htdata832.pdb
If you intend to create public servers and manage them, you'll want
to set up a proper config.json. This is a json formatted dictionary
of the below keys. If you just want to run locally, this isn't required.
Note you can set this up later and then rebuild everything, but it is
easier to do up front. The config variables are:
12. Edit start.sh:
AUTH_PREFIX: some services (like Google Compute Engine), use 'gcloud'
to set up docker registry authentication. If you're using
GCE, set this to gcloud. It is ok to leave this empty if you're
authenticating with your registry in a different way.
a. Change --listen_address to be the ip of your server and the port you
wish to use, in ip:port format.
b. Change --missionpack_dir to the directory that holds the
wicontent.tar.gz content that was untarred into the apache document
root. For example: --missionpack_dir /var/www/wi
c. Change --htdata to ./htdata832.pdb, since it is in the current directory.
d. Change --stats_address to <GAE NAME>.appsport.com:80, using the Google
App Engine name you choose previously.
e. Change --server_name to the server name you wish to show up in-game.
f. Change --server_location to the server location you want to show up
in-game.
g. Change --server_type to production.
h. --server_info_extra is for adding additional json to that shows up
in the /api/serverinfo response from the leaderboard. It is optional but
can be useful for deployment information such as instance ids, zones,
versions, etc. Note the client looks for sort_key to indicate how this
server will be sorted related to others in the server list.
PROJECT_NAME: this is the project name that will be used as part of
the docker image name:
<registry_url>/<project_name>/<image_name>:<image_tag>
If this variable isn't set, the default value is 'server'.
13. Go ahead and run start.sh to start the server. Ultimately this can be
done from a cronjob at system startup using the @reboot directive (just
run start.sh from the @reboot crontab directive).
REGISTRY_URL: this is the url that refers to the registry this
image will reside on. For example for Google Compute Engine,
this might be 'us.gcr.io'. It's ok to keep this empty if there
you have no intent of making public servers.
14. Wait 30 seconds for the game server to announce itself to the leaderboard.
Now start the game client (with the serviceurls.cpp changes), and select
Multiplayer. You will get an error if your client can't see the server.
WICONTENT_URL: The server must have access to the same mission packs
that users have. This is the url of the mission pack tarball that
the server will poll for changes every 5 minutes. The polling
is done by using HTTP ETags for efficiency, which means a download
only takes place if there is a change.
15. You can start as many servers as you want. The client will present a list
of all game servers to the user, who will then choose which server to
connect to.
LEADERBOARD_ADDRESS_AND_PORT: This is the domain name and port of the
leaderboard server, in the format <domain_name>:<port>, typically
<GAE APPNAME>.appspot.com:80. If running locally, that would be
127.0.0.1:<port>, with port typically 8080.
SENDCOMMAND_SECRET: this is the secret key used to sign requests to
the leaderboard's /api/sendcommand request handler. Ths must match
SENDCOMMAND_SECRET found in ../stats/config.py.
16. If this is all working, distribute your game client.
Finally config.json shouldn't be checked into the project, so that it
doesn't accidentally find its way onto github.com.
If you don't provide a config.json, you can still run locally. Your
project_name will default to 'server', your leaderboard will be assumed
to be running at 127.0.0.1:8080, and you won't be able to push images
or create/delete servers.
How to deploy a new game server
-------------------------------
Quick game server overview
--------------------------
Occasionally you'll want to deploy a new game server (with changes perhaps),
yet you want to minimally disrupt game play of players on the existing server.
Here are steps to do that:
The server supports two modes of building and executing. By default the server
is built and run inside a docker container. You can also build and run without
containers if you wish (read more below). Containers are the default because
that is the format the WI team uses for public servers.
1. Build the new server. Deploy it with a lower sort_key so it is first in
the client's server list.
Docker containers can be run on most public hosting services including Amazon,
Google, Azure, and other providers. The WI team uses Google Compute Engine,
and provies utilities for creating and deleting public servers on Google
Compute Engine.
2. Go to the admin page of the leaderboard with your browser:
Building and Running Quick Overview
-----------------------------------
https://<GAE NAME>.appspot.com/private/admin
By default, 'make' will build from a container, and './run' will run
from a container. Read the sections below about using containers and
registry authentication. Native building and running is also supported by
specifying DOCKER=0.
3. Select Drain / Undrain. Select the checkbox next to the old server you will
be stopping, select Drain. In approximately a minute, this will put the
server into a mode where it will stop accepting users.
1. Build the server
4. From the admin page, select Send Chat to Server. Select the checkbox next
to the old server, and type a message that the server will be shutting down
in 15 minutes, and all users should join the new server. Press submit.
Within 30 seconds, all users on that server will see this message. Send the
message every 2 minutes or so to remind users.
[REL=1] [DOCKER=0] make [clean] [all]
5. After 15 minutes or so, stop the old server.
DOCKER=0 means build without using docker containers (default is build
with docker conainers).
REL=1 means build release (default is build debug)
clean removes all build products.
all builds the server. This is the default target if no targets are
specified.
This is the way to do it manually. There is a script that performs these steps
automatically, however if the player count less than 10, just stop the
old server without going through these steps.
2. Run the server
[DEBUGGER=<gdb|cgdb>] [REL=1] [DOCKER=0] ./run
If no mission pack data is installed an attempt will be made to
install it from either testdata.tar.gz or from the WICONTENT_URL specified
in docker/config.json.
Building and Running Using Containers
-------------------------------------
Install the gcloud tool from here:
https://cloud.google.com/sdk/
If you've installed some time ago, make sure it is up to date:
$ gcloud components update
Login:
$ gcloud auth login
Select the Google Compute Engine project you'll be using for this server:
$ gcloud config set project <GCE project name>
The latest image tags are stored in docker/image_versions.json and are updated
every time an image is built. When images are used for building or running,
the latest image tag is looked up in this file and used. You either have this
image locally already, or it is stored on the remote registry. Importantly,
on a multi-person team, this means if you check in image_versions.json, be
sure to first push the images with those versions to the registry (more on
this below).
Note using distinct image versions rather than 'latest' ensures that docker
doesn't use the wrong image for building or running.
1. Install docker:
https://docs.docker.com/engine/installation/
If on OSX or Windows, once installation is complete, run the
"Docker Quickstart Terminal". This initializes the VirtualBox VM with
docker daemon running, and sets up necessary environment variables.
Once this is complete, in any other terminal instance you can type
"eval $(docker env default)" to set environment variables and communicate
with the docker daemon. Note none of this is necessary on Linux since
docker daemon is running natively.
2. Create wi_base and wi_build images if necessary
There are 3 images used:
wi_base: basic image that the other two images are based on
wi_build: wi_base + build tools for building the server + debuggers.
Also used for running locally.
wi_server: used for running public servers.
If you haven't built wi_base or wi_build yet, you need to do that
first, because wi_build is used to build the server. If you will be
using wi_build image stored on your image registry to build the server,
you can skip this step.
$ make new_base_build_images
If you're running a remote registry, be sure to push these new images
to the registry.
$ make push_base_build_images
4. Build the server
If you will be using wi_build image that you built locally, you don't need
to be authenticated to continue. If you are using a wi_build image stored
on your image registry to build the server, first authenticate with your
registry service. For Google Compute, if you already followed the
authentication steps mentioned earlier, you're ready to go.
Refer to build instructions detailed above.
5. Run the server locally
Similar to the above, if you're using a local image you don't need
authentication. If you're using a remote image, be sure to authenticate
with your registry first.
When running locally in a container, the entire source tree is mapped into
the container. This makes it possible for source code debugging to work
properly. Also, docker/entrypoint is mapped into the container. This way
testing changes to entrypoint doesn't require building a new image.
Refer to the run instructions detailed above.
6. Build and push a server image
When you're ready to build a server image, first make sure you are
authenticated as mentioned previously. To build the server image:
$ make new_server_image
This will make REL=1 clean all, then build a wi_server image. No source
code is included in this image, only the bare essentials needed to run
the serer.
Next, you need to push this image to the remote registry, so that your
public server can access it.
$ make push_server_image
7. Create a public server
This command assumes that Google Compute Engine is being used. Make sure
you are authenticated as mentioned previously. To create a public server
on Google Compute Engine:
$ make create_gcloud_server
You'll get asked 3 questions:
1) The first question is about a server id. This should be a number between
1-99, and be unique from other servers. If you have more than one server
deployed, the servers will appear in game in a list sorted by this id.
2) Server Name. This is for appearance only and appears in-game.
3) Server Location. This is also for appearance only and appears in-game.
8. Drain and delete a public server
This command assumes that Google Compute Engine is being used. Make sure
you are authenticated as mentioned previously. To drain and delete a public
server on Google Compute Engine:
$ make drain_delete_gcloud_server
This will lead you through a series of steps to drain and delete a server.
The script will first will wait for the user count to get to a requested
level, then put the server into drain mode. In drain mode, new users can't
join the server. Then every minute a message is broadcasted to the
remaining users that the server will be shut down. This is continued until
the user count drops to zero, or 5 minutes, whichever comes first, then
the server is deleted.
9. Rebuild your client with the changes that were made to serviceurls.cpp.
10. If you can connect to your public server and play, distribute your game client.
Operational overview
--------------------
@ -192,7 +285,7 @@ Operational overview
When a user presses the multiplayer button, the client queries the leaderboard
for a list of current game servers (game servers publish information about
themselves to the leaderboard on a regular interval). This is an example of
what the leaderboard returns to the client for Warfare Incorporated:
what the leaderboard returns to the client for Hostile Takeover:
{
"expires_utc": 1402207827,
@ -239,11 +332,10 @@ $ curl http://<GAE APP NAME>.appspot.com/api/serverinfo
Mission Packs and other content
-------------------------------
The server expects to find mission pack content (and other content) at the
location pointed to by --missionpack_dir. This is a directory tree with
a required layout:
The server expects to find mission pack content (and other content) in this
directory layout:
<missionpack_dir>
wi/
info directory containing mission pack descriptions
pack directory containing mission packs themselves
index an index of the mission packs available for
@ -257,20 +349,13 @@ a required layout:
Mission Packs
-------------
It's important that the client and the server see the same mission packs.
Warfare Incorporated supports user submitted missions packs submitted to
It's necessary that the client and the server see the same mission packs.
Hostile Takeover supports user submitted missions packs submitted to
the forums. A cronjob pulls the mission packs out of the forums, checks them
for validity, and builds the new missionpack_dir content, and pushes these
changes out to all game servers. This happens every 15 minutes or so. Game
servers watch when the index file changes and reload.
for validity, and builds a tarball with the above layout. The game servers
will poll for changes to this tarball in an efficient way.
Game servers could use the content from warfareincorporated.com directly by
updating from this url:
http://www.warfareincorporated.com/~wicontent/wicontent.tar.gz
NOTE: This file is currently a static snapshot. It can be made to
dynamically update in the future. Contact scottlu.
Please contact scottlu for a copy of the latest tarball.
Modlist

106
server/docker/build_image Executable file
View File

@ -0,0 +1,106 @@
#!/usr/bin/python2.7
"""build_image <image name>
This tool wraps docker build to handle image versions appropriately
"""
import re
import sys
import os
import json
import time
import datetime
import tempfile
import subprocess
import shlex
import shutil
import config
registry = config.get('REGISTRY_PREFIX')
def load_versions(filename):
if not os.path.exists(filename):
print 'Can\'t find image_versions.json'
sys.exit(1)
t = json.loads(file(filename).read())
versions = {}
for key in t:
versions[key.encode('utf-8')] = t[key].encode('utf-8')
return versions
def save_versions(filename, versions):
filename_tmp = '%s_tmp' % filename
f = file(filename_tmp, 'w')
json.dump(versions, f, ensure_ascii=True)
f.close()
shutil.move(filename_tmp, filename)
def create_dockerfile(image_name, dockerfile_template, versions):
fd, path = tempfile.mkstemp(suffix='.bi', prefix='__%s_' % image_name, dir='.', text=True)
for line in [line.strip() for line in file(dockerfile_template).readlines()]:
for key in versions:
pat = '%s:latest' % key
repl = '%s/%s:%s' % (registry, key, versions[key])
line = re.sub(pat, repl, line)
os.write(fd, '%s\n' % line)
os.close(fd)
return path
def create_version_str():
# Example: 2015-12-21-1440438708
ts = int(time.time())
return '%s-%s' % (datetime.date.fromtimestamp(ts), ts)
def main():
if len(sys.argv) == 1:
print __doc__
sys.exit(1)
basedir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), '../..'))
image_name = sys.argv[1]
dockerfile_paths = {
'wi_base': os.path.join(basedir, 'server/docker/wi_base.df'),
'wi_build': os.path.join(basedir, 'server/docker/wi_build.df'),
'wi_server': os.path.join(basedir, 'server/docker/wi_server.df')
}
# One of the dockerfiles we recognize?
if not dockerfile_paths.has_key(image_name):
print 'Image %s doesn\'t exist.' % image_name
sys.exit(1)
# Make sure this template exists
dockerfile_template = dockerfile_paths[image_name]
if not os.path.exists(dockerfile_template):
print 'Dockerfile template %s doesn\'t exist.' % dockerfile_template
sys.exit(1)
# Insert the version into this dockerfile before using
versions = load_versions(os.path.join(basedir, 'server/docker/image_versions.json'))
dockerfile_tempfile = create_dockerfile(image_name, dockerfile_template, versions)
# Create a new version number for this image
version_str = create_version_str()
# Build the image
print 'Building %s/%s:%s' % (registry, image_name, version_str)
s = 'docker build --rm=true -f %s -t %s/%s:%s %s'
args = shlex.split(s % (dockerfile_tempfile, registry, image_name, version_str, basedir))
return_code = subprocess.call(args)
os.remove(dockerfile_tempfile)
if return_code != 0:
print 'docker build returned error %d' % return_code
sys.exit(1)
# This image has a new version. Write new version file.
versions[image_name] = version_str
save_versions(os.path.join(basedir, 'server/docker/image_versions.json'), versions)
# Print out created with tag
print 'Image tag %s/%s:%s' % (registry, image_name, version_str)
if __name__ == '__main__':
main()

7
server/docker/config Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/python2.7
import sys
import config
if len(sys.argv) == 2:
print config.get(sys.argv[1])

View File

@ -0,0 +1,8 @@
{
"AUTH_PREFIX": "",
"PROJECT_NAME": "server",
"REGISTRY_URL": "",
"WICONTENT_URL": "",
"LEADERBOARD_ADDRESS_AND_PORT": "127.0.0.1:8080",
"SENDCOMMAND_SECRET": "REPLACEME_SENDCOMMANDSECRET"
}

31
server/docker/config.py Normal file
View File

@ -0,0 +1,31 @@
import os, sys, json
def get_value(j, key):
return j[key] if j.has_key(key) else ''
def get(key):
try:
filepath = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'config.json')
j = json.loads(file(filepath).read())
except:
j = {}
s = get_value(j, key)
if s:
return s
# PROJECT_NAME is a required value. Provide a default value if necessary.
project_name = get_value(j, 'PROJECT_NAME')
if not project_name:
project_name = 'server'
if key == 'PROJECT_NAME':
return project_name
if key == 'REGISTRY_PREFIX':
registry_url = get_value(j, 'REGISTRY_URL')
if not registry_url:
return project_name
return '/'.join([registry_url, project_name])
return ''

View File

@ -0,0 +1,64 @@
#!/bin/bash
BINDIR="$(cd "${0%/*}" && echo $PWD)"
IMAGE_TAG=$(${BINDIR}/image_version wi_server)
WICONTENT_URL="$(${BINDIR}/config WICONTENT_URL)"
PROJECT_NAME="$(${BINDIR}/config PROJECT_NAME)"
IMAGE="$(${BINDIR}/config REGISTRY_PREFIX)/wi_server"
WICONTENT_URL="$(${BINDIR}/config WICONTENT_URL)"
LEADERBOARD_ADDRESS_AND_PORT="$(${BINDIR}/config LEADERBOARD_ADDRESS_AND_PORT)"
INSTANCE_NAME="i-${IMAGE_TAG}"
TEMP_FILENAME=$(mktemp)
echo "NOTE: This script assumes use of Google Compute Engine."
echo "Before continuing ensure these steps have been taken:"
echo "1. Install gcloud from here if not already installed:"
echo " https://cloud.google.com/sdk/"
echo "2. If already installed ensure it is up to date:"
echo " $ gcloud components update"
echo "3. Authenicate:"
echo " $ gcloud auth login"
echo "4. Select the proper project:"
echo " $ gcloud config set project ${PROJECT_NAME}"
echo -n "Unique numeric server id (used for sorting): "
read SERVER_ID
echo -n "Server name (appears in-game): "
read SERVER_NAME
echo -n "Server location (appears in-game): "
read SERVER_LOCATION
echo -n "Type yes to create new public server: "
read YES
if [ "$YES" != "yes" ]; then
exit 1;
fi
cat > ${TEMP_FILENAME} <<xyzzy
apiVersion: v1
kind: Pod
metadata:
name: ${PROJECT_NAME}
spec:
containers:
- name: ${PROJECT_NAME}
image: ${IMAGE}:${IMAGE_TAG}
args: ['runwis', '--build_type', 'release', '--public_listen_port', '22221', '--image_tag', '${IMAGE_TAG}', '--gce_metadata', '--wicontent_url', '${WICONTENT_URL}', '--args', '--server_id', '${SERVER_ID}', '--server_name', '${SERVER_NAME}', '--server_location', '${SERVER_LOCATION}', '--server_type', 'production', '--listen_address', '0.0.0.0', '--listen_port', '22221', '--stats_address', '${LEADERBOARD_ADDRESS_AND_PORT}', '--checksync']
imagePullPolicy: Always
ports:
- containerPort: 22221
hostPort: 22221
xyzzy
echo Launching instance ${INSTANCE_NAME}...
gcloud compute instances create "${INSTANCE_NAME}" \
--image container-vm \
--metadata-from-file google-container-manifest=${TEMP_FILENAME} \
--zone us-central1-a \
--machine-type g1-small
if [ $? -eq 0 ]; then
echo Success.
else
echo Error code $?
fi
rm -f ${TEMP_FILENAME}

View File

View File

@ -0,0 +1,219 @@
#!/usr/bin/python2.7
import md5
import os
import urllib2
import json
import sys
import time
import config
DRAIN = True
TERMINATE = True
SHUTDOWN_MINUTES = 5
sendcommand_secret = config.get('SENDCOMMAND_SECRET')
leaderboard_address = config.get('LEADERBOARD_ADDRESS_AND_PORT')
sendcommand_url = 'http://%s/api/sendcommand' % leaderboard_address
serverinfo_url = 'http://%s/api/leaderboard' % leaderboard_address
project_name = config.get('PROJECT_NAME')
#SHUTDOWN_MINUTES = 5
#sendcommand_url = 'http://localhost:8080/api/sendcommand'
#serverinfo_url = 'http://localhost:8080/api/serverinfo'
def get_serverinfo():
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(serverinfo_url)
request.get_method = lambda: 'GET'
try:
response = opener.open(request).read()
except urllib2.HTTPError, e:
print 'Error %d, %s' % (e.code, e.read())
return None
return json.loads(response)
def get_updated_info(info):
serverinfo = get_serverinfo()
for i in serverinfo['infos']:
if i['name'] == info['name'] and i['start_utc'] == info['start_utc']:
return i
return None
def which_servers(serverinfo):
if len(serverinfo['infos']) == 0:
return None
index = 0
for info in serverinfo['infos']:
print 'Server #%d. Name: %s' % (index, info['name'])
for key in sorted(info.keys()):
if key == 'name':
continue
print ' %s: %s' % (key, info[key])
index = index + 1
info = None
while True:
try:
ans = raw_input('Which server number to drain and delete? ')
index = int(ans)
if index >= 0 and index < len(serverinfo['infos']):
info = serverinfo['infos'][index]
break
print 'Incorrect server number. Try again.'
except ValueError:
print 'Not a number. Try again.'
join_info = None
while True:
try:
ans = raw_input('Which server number should players join (-1 for none)? ')
index = int(ans)
if index >= 0 and index < len(serverinfo['infos']):
join_info = serverinfo['infos'][index]
break
if index == -1:
join_info = None
break
print 'Incorrect server number. Try again.'
except ValueError:
print 'Not a number. Try again.'
return info, join_info
def ask_continue(server_name, join_server, shutdown_count, player_count):
print 'PLEASE VERIFY:'
print '1. You wish to drain and delete server %s.' % server_name
print '2. You wish to start this process once the player count reaches <= %d.' % player_count
if join_server:
print '3. Once reached, remaining players will receive a message to join server %s.' % join_server
else:
print '3. Once reached, remaining players will receive a warning message every minute.'
print '4. The server will be deleted when the user count goes to zero or after %d minutes.' % shutdown_count
while True:
yesno = raw_input('Would you like to continue? (y/n) ')
if len(yesno) < 1:
continue
if yesno[0] == 'n' or yesno[0] == 'N':
return False
if yesno[0] == 'y' or yesno[0] == 'Y':
return True
def ask_player_count(server_name, info):
print '%s currently has a player count of %d.' % (server_name, info['player_count'])
while True:
s = raw_input('What player count should trigger the drain-delete process? ')
try:
return int(s)
except ValueError:
print '%s is not a number. Try again.' % s
def wait_player_count(server_name, info, player_count):
while True:
print '%s current player count: %d. Trigger: %d' % \
(server_name, info['player_count'], player_count)
if info['player_count'] <= player_count:
return
time.sleep(60)
info = get_updated_info(info)
def send_command(o):
j = json.dumps(o)
m = md5.new(j + sendcommand_secret)
body = m.hexdigest() + j
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(sendcommand_url, data=body)
request.add_header('Content-Type', 'binary/octet-stream')
request.add_header('Content-Length', len(body))
request.get_method = lambda: 'POST'
try:
response = opener.open(request).read()
return True
except urllib2.HTTPError, e:
print 'error %d, %s' % (e.code, e.read())
return False
def drain_wait(info):
o = dict(info=dict(name=info['name'], start_utc=info['start_utc']),
command=dict(command='drain'))
print 'Sending drain command.'
if not send_command(o):
return False
while True:
print 'Waiting for drain confirmation.'
new_info = get_updated_info(info)
if not new_info:
return False
if new_info['status'] == 'drain':
print 'Drain confirmed.'
return True
time.sleep(30)
def send_shutdown_message(info, join_server, shutdown_count):
if join_server:
message = 'Shutdown in %d minutes. Join server %s now!' % \
(shutdown_count, join_server)
else:
message = 'Shutdown in %d minutes.' % shutdown_count
o = dict(info=dict(name=info['name'], start_utc=info['start_utc']),
command=dict(command='chat', name='Admin', message=message))
print 'Sending: %s: %s' % (o['command']['name'],
o['command']['message'])
return send_command(o)
def main():
print "NOTE: Before continuing ensure these steps have been taken:"
print "1. Install gcloud from here if not already installed:"
print " https://cloud.google.com/sdk/"
print "2. If already installed ensure it is up to date:"
print " $ gcloud components update"
print "3. Authenicate:"
print " $ gcloud auth login"
print "4. Select the proper project:"
print " $ gcloud config set project %s" % project_name
serverinfo = get_serverinfo()
if not serverinfo:
print 'could not get serverinfo.'
sys.exit(1)
info, join_info = which_servers(serverinfo)
if not info:
print 'no server selected.'
sys.exit(1)
server_name = '%s (start_utc: %s)' % (info['name'], info['start_utc'])
join_server = join_info['name'] if join_info else None
player_count = ask_player_count(server_name, info)
shutdown_count = SHUTDOWN_MINUTES
if not ask_continue(server_name, join_server, shutdown_count, player_count):
print 'no action taken.'
sys.exit(1)
wait_player_count(server_name, info, player_count)
if DRAIN:
print 'Setting %s to drain mode. Waiting for confirmation.' % server_name
if not drain_wait(info):
print 'could not set %s into drain mode. no action taken.' % server_name
sys.exit(1)
while shutdown_count != -1:
new_info = get_updated_info(info)
print 'players: %d' % new_info['player_count']
if new_info['player_count'] == 0:
break
send_shutdown_message(info, join_server, shutdown_count)
shutdown_count = shutdown_count - 1
time.sleep(60)
print 'deleting: %s instance name: %s' % (server_name, info['instance_name'])
if TERMINATE:
sys.exit(os.system('gcloud compute instances delete "%s" --quiet --zone us-central1-a' % (info['instance_name'])))
sys.exit(0)
if __name__ == '__main__':
main()

153
server/docker/entrypoint Executable file
View File

@ -0,0 +1,153 @@
#!/bin/bash
set -e
usage() {
echo Usage: `basename "$0"` "runwis [args] --args <wis args>"
echo "--args: All parameters after this point get passed to wis"
echo "--public_ip: The public ip address that users will connect to."
echo "--public_listen_port: The public port that users will connect to."
echo "--instance_name: Name of the VM instance this server is running on."
echo "--image_tag: The tag of the docker image being used, if known."
echo "--build_type: release or debug."
echo "--gce_metadata: Query Google GCE metadata for public ip and instance name."
echo "--help: Displays this usage information."
}
function download_content() {
local WICONTENT_URL="${1}"
local WICONTENT_DIR=/wi/server/docker/wicontent
local ETAG_FILEPATH="${WICONTENT_DIR}/wicontent.tar.gz-etag"
local TARBALL_FILEPATH="${WICONTENT_DIR}/wicontent.tar.gz"
local LAST_ETAG="force"
# No url? Don't attempt download.
if [ -z "$WICONTENT_URL" ]; then
return
fi
# Grab the last ETag if it exists
if [ -f "${ETAG_FILEPATH}" ]; then
LAST_ETAG=$(cat "${ETAG_FILEPATH}")
fi
# Request the file with If-None-Match
local ETAG=$(curl -s -D - "${WICONTENT_URL}" -H 'If-None-Match: "'${LAST_ETAG}'"' -o /tmp/out.bin | grep ETag | cut -d: -f2 | cut -d\" -f2)
# If we have an ETag there was a response. If it doesn't match last one
# then there is a file change
if [[ ! -z "${ETAG}" ]] && [[ "${ETAG}" != "${LAST_ETAG}" ]]; then
mv /tmp/out.bin "${TARBALL_FILEPATH}"
tar -xf "${TARBALL_FILEPATH}" -C "${WICONTENT_DIR}"
echo -n "${ETAG}" > "${ETAG_FILEPATH}"
fi
}
# Run normally if not runwis
if [ "$1" != "runwis" ]; then
exec "$@"
fi
# Need to be user wi
if [ "$(whoami)" != "wi" ]; then
exec gosu wi "$0" "$@"
fi
shift
# Poll for content?
if [ "${1}" == "--poll-content" ]; then
while true; do
sleep 300
download_content "${2}"
done
fi
# Parse parameters
until [ -z "$1" ]; do
case "$1" in
--args)
shift
break
;;
--debugger)
shift
DEBUGGER="$1 --args"
;;
--public_ip)
shift
PUBLIC_IP="$1"
;;
--public_listen_port)
shift
PUBLIC_LISTEN_PORT="$1"
;;
--instance_name)
shift
INSTANCE_NAME="$1"
;;
--image_tag)
shift
IMAGE_TAG="$1"
;;
--build_type)
shift
BUILD_TYPE="$1"
;;
--gce_metadata)
QUERY_GCE_METADATA="1"
;;
--wicontent_url)
shift
WICONTENT_URL="$1"
;;
--help)
usage
exit 0
;;
*)
echo Unknown argument \'$1\'
usage
exit 1
;;
esac
shift
done
if [ ! -z $QUERY_GCE_METADATA ]; then
INSTANCE_NAME=$(curl -s "http://metadata.google.internal/computeMetadata/v1/instance/hostname" -H "Metadata-Flavor: Google" | cut -d. -f1)
PUBLIC_IP=$(curl -s "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" -H "Metadata-Flavor: Google")
fi
# This is the full public address
PUBLIC_ADDRESS=$PUBLIC_IP:$PUBLIC_LISTEN_PORT
# Tie together instance name and image tag for convenient querying
EXTRA_INFO='{"instance_name":"'${INSTANCE_NAME}'","image_tag":"'${IMAGE_TAG}'"}'
# Download content before running server
download_content "${WICONTENT_URL}"
# Poll for new content in the background
"$0" runwis --poll-content "${WICONTENT_URL}" &
# Run the server and restart if it crashes
while [ 1 ]; do
${DEBUGGER} /wi/server/docker/${BUILD_TYPE}/wis --missionpack_dir /wi/server/docker/wicontent/wi --htdata /wi/game/htdata832.pdb --stats_path /api/addgamestats --server_info_path /api/serverinfo --server_info_expires 60 --public_address "$PUBLIC_ADDRESS" --server_info_extra "$EXTRA_INFO" "$@"
if [ ! -z "${DEBUGGER}" ]; then
exit 0
fi
sleep 1
done

46
server/docker/image_version Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/python2.7
"""image_version <image name>
This tool returns the latest version tag for an image
"""
import re
import sys
import os
import json
import time
import datetime
import tempfile
import subprocess
import shlex
import shutil
def load_versions(filename):
if not os.path.exists(filename):
print 'Can\'t find image_versions.json'
sys.exit(1)
t = json.loads(file(filename).read())
versions = {}
for key in t:
versions[key.encode('utf-8')] = t[key].encode('utf-8')
return versions
def main():
if len(sys.argv) == 1:
print __doc__
sys.exit(1)
image_name = sys.argv[1]
dir = os.path.dirname(os.path.abspath(sys.argv[0]))
versions = load_versions(os.path.join(dir, 'image_versions.json'))
# Does version exist for this image?
if not versions.has_key(image_name):
print 'Version not available for %s.' % image_name
sys.exit(1)
print versions[image_name]
if __name__ == '__main__':
main()

View File

@ -0,0 +1 @@
{"wi_server": "", "wi_base": "", "wi_build": ""}

54
server/docker/push_image Executable file
View File

@ -0,0 +1,54 @@
#!/usr/bin/python2.7
"""push_image <image name>
This tool wraps docker push to handle image versions appropriately
"""
import sys
import os
import json
import subprocess
import shlex
import config
registry = config.get('REGISTRY_PREFIX')
auth = config.get('AUTH_PREFIX')
def load_versions(filename):
if not os.path.exists(filename):
print 'Can\'t find image_versions.json'
sys.exit(1)
t = json.loads(file(filename).read())
versions = {}
for key in t:
versions[key.encode('utf-8')] = t[key].encode('utf-8')
return versions
def main():
if len(sys.argv) == 1:
print __doc__
sys.exit(1)
image_name = sys.argv[1]
tag_prefixes = sys.argv[2:]
dir = os.path.dirname(os.path.abspath(sys.argv[0]))
versions = load_versions(os.path.join(dir, 'image_versions.json'))
# Does version exist for this image?
if not versions.has_key(image_name):
print 'Version not available for %s.' % image_name
sys.exit(1)
# Push the image
image = '%s/%s:%s' % (registry, image_name, versions[image_name])
print 'Pushing image %s' % image
s = '%s docker -- push %s' % (auth, image)
return_code = subprocess.call(shlex.split(s))
if return_code != 0:
print s
print 'returned error %d' % return_code
sys.exit(return_code)
if __name__ == '__main__':
main()

View File

14
server/docker/wi_base.df Normal file
View File

@ -0,0 +1,14 @@
FROM google/debian:wheezy
RUN apt-get update -y \
&& apt-get install --no-install-recommends -y -q curl ca-certificates \
&& rm -rf /var/lib/apt/lists/*
RUN gpg --keyserver pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4
RUN curl -o /usr/local/bin/gosu -SL "https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture)" \
&& curl -o /usr/local/bin/gosu.asc -SL "https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture).asc" \
&& gpg --verify /usr/local/bin/gosu.asc \
&& rm /usr/local/bin/gosu.asc \
&& chmod +x /usr/local/bin/gosu
RUN groupadd wi -g 1000 && useradd wi -g wi -u 1000 \
&& mkdir -p /home/wi && chown wi:wi /home/wi \
&& mkdir -p /wi && chown wi:wi /wi

View File

@ -0,0 +1,5 @@
FROM wi_base:latest
RUN apt-get update -y \
&& apt-get install --no-install-recommends -y -q net-tools build-essential gdb cgdb \
&& rm -rf /var/lib/apt/lists/*

View File

@ -0,0 +1,7 @@
FROM wi_base:latest
RUN mkdir -p /wi/server/docker/wicontent && chown wi:wi /wi/server/docker/wicontent
COPY game/htdata832.pdb /wi/game/htdata832.pdb
COPY server/docker/release /wi/server/docker/release
COPY server/docker/entrypoint /entrypoint
ENTRYPOINT ["/entrypoint"]

View File

88
server/run Executable file
View File

@ -0,0 +1,88 @@
#!/bin/bash
SERVER_DIR="$(cd "${0%/*}" && echo $PWD)"
LEADERBOARD_ADDRESS_AND_PORT="$(${SERVER_DIR}/docker/config LEADERBOARD_ADDRESS_AND_PORT)"
# Usage:
#
# [DEBUGGER=<gdb|cgdb> [DOCKER=0] [REL=1] ./run
#
# Set DEBUGGER to cgdb or gdb to debug
# Set DOCKER=0 to run without docker (runs with docker by default)
# Set REL=1 to run release (runs debug by default)
run_with_docker() {
# Ensure control-c goes to docker, not this script
set -m
# Use wi_build because it has debuggers in it. wi_build contains app data and images
# because the entire wi tree gets mapped in.
TAG_VERSION="$(${SERVER_DIR}/docker/image_version wi_build)"
IMAGE="$(${SERVER_DIR}/docker/config REGISTRY_PREFIX)/wi_build"
AUTH_PREFIX="$(${SERVER_DIR}/docker/config AUTH_PREFIX)"
# Debugger
if [ ! -z "$DEBUGGER" ]; then
DEBUGGER="--debugger ${DEBUGGER}"
fi
# Prepare test data once, for faster iteration
if [ ! -f "${SERVER_DIR}/docker/wicontent/wi/index" ]; then
# Expand test data if it exists, otherwise try to download it
if [ -f "${SERVER_DIR}/testdata.tar.gz" ]; then
tar -xf "${SERVER_DIR}/testdata.tar.gz" -C "${SERVER_DIR}/docker/wicontent"
mv "${SERVER_DIR}/docker/wicontent/testdata" "${SERVER_DIR}/docker/wicontent/wi"
else
WICONTENT_URL="$(${SERVER_DIR}/docker/config WICONTENT_URL)"
fi
fi
# Run
echo Build: ${BUILD_TYPE}, image: ${IMAGE}:${TAG_VERSION}
${AUTH_PREFIX} docker -- run -it --rm -p 22221:22221 -v "${SERVER_DIR}/docker/entrypoint":/entrypoint -v "$(dirname "${SERVER_DIR}")":/wi ${IMAGE}:${TAG_VERSION} /entrypoint runwis ${DEBUGGER} --build_type ${BUILD_TYPE} --public_ip 127.0.0.1 --public_listen_port 22221 --instance_name "My Instance Name" --image_tag "My Image Tag" --wicontent_url "${WICONTENT_URL}" --args --server_id 50 --server_name "My Server" --server_location "My Office" --server_type "production" --listen_address 0.0.0.0 --listen_port 22221 --stats_address "${LEADERBOARD_ADDRESS_AND_PORT}" --checksync
rm -f "${TEMP_ENTRYPOINT}"
}
run_without_docker() {
# Attempt putting test data in place
if [ ! -f "${SERVER_DIR}/docker/wicontent/wi/index" ]; then
# Expand test data if it exists
if [ -f "${SERVER_DIR}/testdata.tar.gz" ]; then
echo Untaring testdata.tar.gz...
tar -xf "${SERVER_DIR}/testdata.tar.gz" -C "${SERVER_DIR}/docker/wicontent"
mv "${SERVER_DIR}/docker/wicontent/testdata" "${SERVER_DIR}/docker/wicontent/wi"
else
WICONTENT_URL="$(${SERVER_DIR}/docker/config WICONTENT_URL)"
if [ ! -z "${WICONTENT_URL}" ]; then
echo Attempting download of wicontent.tar.gz...
curl -s "${WICONTENT_URL}" -o "${SERVER_DIR}/docker/wicontent/wicontent.tar.gz"
tar -xf "${SERVER_DIR}/docker/wicontent/wicontent.tar.gz" -C "${SERVER_DIR}/docker/wicontent"
fi
fi
fi
if [ ! -f "${SERVER_DIR}/docker/wicontent/wi/index" ]; then
echo "Mission pack info missing ${SERVER_DIR}/docker/wicontent"
exit 1
fi
# Debugger
if [ ! -z "$DEBUGGER" ]; then
DEBUGGER="${DEBUGGER} --args"
fi
${DEBUGGER} ${BUILD_TYPE}/wis --listen_address 127.0.0.1:22221 --server_id 0 --missionpack_dir docker/wicontent/wi --htdata ../game/htdata832.pdb --stats_address "${LEADERBOARD_ADDRESS_AND_PORT}" --stats_path /api/addgamestats --server_info_path /api/serverinfo --server_name Icarus --server_location "Seattle WA" --server_type beta --server_info_extra "{\"foo\":\"bar\"}" --server_info_expires 60 --checksync --max_players 2000
}
if [ "${REL}" == "1" ]; then
BUILD_TYPE=release
else
BUILD_TYPE=debug
fi
if [ "${DOCKER}" == "0" ]; then
run_without_docker
else
run_with_docker
fi

View File

@ -1 +0,0 @@
debug/hts --listen_address 127.0.0.1:18516 --server_id 0 --missionpack_dir testdata --htdata ../game/htdata832.pdb --stats_address 127.0.0.1:8080 --stats_path /api/addgamestats --server_info_path /api/serverinfo --server_name Icarus --server_location "Seattle WA" --server_type beta --server_info_extra "{\"foo\":\"bar\"}" --server_info_expires 60 --checksync --max_players 2000 2>&1 | tee log.txt