boot.sh 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. #!/bin/bash
  2. ################################################################
  3. ## Finals Club instance set up notes by Joe Snow 11/19/2011
  4. ################################################################
  5. ## these are the steps I take to create a new fc instance
  6. ## log into aws, launch new AMI instance
  7. ## ssh into machine
  8. ## sudo to root
  9. ## download and run boot.sh (this file -- either from s3 or ssh), this can take 10 minutes
  10. ## NOTE: start.sh usually fails due to env vars, etc..
  11. ## as ec2-user, run start.sh
  12. ## add arbiter db config, and mongo replicated set config, import mongo data using mongorestore command
  13. ## I usually create and grab the latest mongodb backup from S3 (created from existing live server)
  14. ## add dev public keys to /home/ec2-user/.ssh/authorized_keys
  15. ## update /home/ec2-user/.bashrc with AWS env vars
  16. ## add /home/ec2-user/fc/fcbackup/.fcbackup.env (populated with AWS env vars)
  17. ## restart fc app
  18. ## add crontab, using fc/util/crontab-example.txt as example as needed.
  19. ## update cloudwatch monitors for cpu, disk, etc..
  20. ## as root, start epl monitor script in /home/ec2-user/fc/util/start-fc-epl-monitor.sh
  21. ## check app health and switch DNS as desired
  22. ################################################################
  23. cd /root
  24. if test ! -e "reset.sh" ; then
  25. cat > "reset.sh" << FIN
  26. #!/bin/bash
  27. curl https://s3.amazonaws.com/finalsclub.org/boot.sh | sh
  28. FIN
  29. chmod 500 reset.sh
  30. fi
  31. echo "Booting" `date`
  32. yes | yum --nogpgcheck install gcc-c++
  33. yes | yum --nogpgcheck install openssl-devel
  34. yes | yum --nogpgcheck install make
  35. yes | yum --nogpgcheck install git
  36. yes | yum --nogpgcheck install sqlite-devel
  37. yes | yum --nogpgcheck install mysql-server
  38. # /etc/init.d/mysqld start
  39. # /usr/bin/mysqladmin -u root password 'foobarbazquX'
  40. # install mongodb
  41. mongover="1.8.2"
  42. if test ! -e mongodb.tgz ; then
  43. curl http://fastdl.mongodb.org/linux/mongodb-linux-i686-$mongover.tgz > mongodb-linux-i686-$mongover.tgz
  44. tar xzf mongodb-linux-i686-$mongover.tgz
  45. cd mongodb-linux-i686-$mongover/bin
  46. chmod a+rx *
  47. chmod uo-w *
  48. cp -f * /usr/local/bin
  49. mkdir -p /data/db
  50. /usr/local/bin/mongod -v --rest --replSet finalsclubset &> /var/log/mongod.log &
  51. ## optional arbiter start command
  52. ## mkdir -p /data/arbiterdb
  53. ## /usr/local/bin/mongod -v --dbpath /data/arbiterdb --port 27021 --rest --replSet finalsclubset &> /var/log/mongod-arbiter.log &
  54. #### NOTE: the replicated set name could change or increment, for example we might be using finalsclubset4 instead of finalsclubset
  55. ## example to set up new clean replicated set
  56. ## as ec2-user
  57. ## mongo ### start mongo cli util from bash prompt
  58. ## > rs.initiate() ## init the replicated set (assumes you are starting a new clean replicated set)
  59. ## > rs.addArb("ip-10-166-206-34:27021") ## assumes arbiter instance was started previously on specified port, IP for example only, use same machineID
  60. ## > rs.status() ## confirm both instances are in set
  61. fi
  62. # install node
  63. nodever="v0.4.10"
  64. if test ! -e node-$nodever ; then
  65. curl http://nodejs.org/dist/node-$nodever.tar.gz > node-$nodever.tar.gz
  66. tar xzvf node-$nodever.tar.gz
  67. cd node-$nodever
  68. ./configure
  69. make
  70. make install
  71. fi
  72. # install npm
  73. if test ! -e npm ; then
  74. git clone http://github.com/isaacs/npm.git
  75. cd npm
  76. sudo make install
  77. cd ..
  78. fi
  79. npm install nodemon -g
  80. npm install forever -g
  81. ## make it easier for root to run node
  82. cd /usr/bin
  83. ln -sf /usr/local/bin/node .
  84. ln -sf /usr/local/bin/forever .
  85. ## haproxy install (optional)
  86. # assumes this script is running as root
  87. mkdir /usr/local/haproxy
  88. cd /usr/local/haproxy
  89. wget http://haproxy.1wt.eu/download/1.4/bin/haproxy-1.4.17-pcre-40kses-linux-i586.notstripped.gz
  90. gunzip haproxy-1.4.17-pcre-40kses-linux-i586.notstripped.gz
  91. ln -sf haproxy-1.4.17-pcre-40kses-linux-i586.notstripped haproxy
  92. chmod 770 haproxy*
  93. wget https://s3.amazonaws.com/finalsclub.org/haproxy.cfg
  94. chmod 660 haproxy.cfg
  95. ## command to start haproxy (from /usr/local/haproxy dir)
  96. # sudo /usr/local/haproxy/haproxy -f /usr/local/haproxy/haproxy.cfg -p /var/run/haproxy.pid &
  97. ## init the reboot-restart.sh script, but don't run it.
  98. cd ~
  99. wget https://s3.amazonaws.com/finalsclub.org/reboot-restart.sh
  100. chmod 755 reboot-restart.sh
  101. echo "/root/reboot-restart.sh &> /var/log/fc-reboot-restart.log.txt &" >> /etc/rc.local
  102. ## NOTE: each time, I've had to run this step manually, as the ec2-user, after env vars have been set up
  103. cd /home/ec2-user
  104. curl https://s3.amazonaws.com/finalsclub.org/start.sh | sudo -u ec2-user sh