/var/cache/debconf/config.dat-new: Permission denied - clojure

I'm getting debconf: DbDriver \"passwords\" warning: could not open /var/cache/debconf/passwords.dat: Permission denied\ndebconf: DbDriver \"config\": could not write /var/cache/debconf/config.dat-new: Permission denied\n#> [packages]: Packages : FAIL\n" when I run this. Looks like it isn't sudoing for whatever reason.
(ns localhost.idk
(:require (pallet [compute :as compute]
[api :as api :refer [lift]]
[actions :as actions])))
;; Running this on my local machine
(def my-data-center
(compute/instantiate-provider
"node-list"
:node-list [["localhost" "ed" "127.0.0.1" :ubuntu
:is-64bit nil]]))
(def user-deadghost
(api/make-user "deadghost"
:password "my-pw"
:sudo-password nil)) ; pwless sudo set up
(defn install-ed []
(pallet.api/lift
(pallet.api/group-spec
"ed"
:phases {:configure (api/plan-fn
;; This works:
;; (pallet.actions/exec-script
;; ("sudo aptitude install ed"))
;; This is trying to run without sudo:
(actions/packages :aptitude ["ed"]))})
;; log shows: p.script-builder prefix kw :no-sudo
:compute my-data-center
:user user-deadghost))
Script it's running:
#!/usr/bin/env bash
mkdir -p /home/deadghost || exit 1
cd /home/deadghost
set -h
echo '[packages]: Packages...';
{
{ debconf-set-selections <<EOF
debconf debconf/frontend select noninteractive
debconf debconf/frontend seen false
EOF
} && enableStart() {
rm /usr/sbin/policy-rc.d
} && apt-get -q -y install ed+ && dpkg --get-selections
} || { echo '#> [packages]: Packages : FAIL'; exit 1;} >&2
echo '#> [packages]: Packages : SUCCESS'
exit $?
The error I'm receiving is consistent with the debconf portion not having sudo.

Answer provided by hugod, author of pallet.
As of pallet 0.8.0-RC.11 when running on localhost the default script prefix is :no-sudo when the default would otherwise be :sudo. This is for historical reasons I don't know the details about.
To change the script prefix back to sudo, wrap your action with (pallet.action/with-action-options {:script-prefix :sudo} YOUR-ACTION-HERE). So in my case it will look like this:
(defn install-ed []
(pallet.api/lift
(pallet.api/group-spec
"ed"
:phases {:configure (api/plan-fn
(pallet.action/with-action-options
{:script-prefix :sudo}
(actions/packages :aptitude ["ed"])))})
:compute my-data-center
:user user-deadghost))

Related

GKE cluster using gitbash tool

I have my python3.7 installed on following path on my windows - C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Python 3.7
I am trying to connect GCP GKE cluster using GitBash and when i run below gcloud command to connect GKE cluster i am getting an python not found error.
$ gcloud container clusters get-credentials appcluster --region us-east4 --project dev /c/Users/surendar/AppData/Local/Google/Cloud SDK/google-cloud-sdk/bin/gcloud: line 181: exec: python: not found
Any suggestion's please to resolve the error?
Below is the Google/Cloud SDK/google-cloud-sdk/bin/gcloud file
181 line points to below declaration which is last line of the file
exec "$CLOUDSDK_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/lib/gcloud.py
# Copyright 2013 Google Inc. All Rights Reserved.
#
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
# Check whether passed in python command reports major version 3.
_is_python3() {
echo "$("$1" -V 2>&1)" | grep -E "Python 3" > /dev/null
}
# For Python 3, gsutil requires Python 3.5+.
_py3_interpreter_compat_with_gsutil () {
# Some environments (e.g. macOS) don't support grep -P, so we use grep -E.
echo "$("$1" -V 2>&1)" | grep -E "Python 3[.]([5-9]|[1-9][0-9])" > /dev/null
}
order_python() {
selected_version=""
for python_version in "$#"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null && "$python_version" -c "import sys; print(sys.version)" > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python2
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present?
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ];
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
CLOUDSDK_PYTHON=$(order_python python3 python2 python2.7 python)
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
case $HOSTNAME in
*.corp.google.com|*.c.googlers.com) export CLOUDSDK_GOOGLE_AUTH_IS_GOOGLE_DOMAIN=true;;
esac
# </cloud-sdk-sh-preamble>
exec "$CLOUDSDK_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/lib/gcloud.py"** "$#"```
You will need to point the environment variable CLOUDSDK_PYTHON at your Python executable (e.g. python.exe). To find the Python executable, you should be able to right-click on "Python 3.7" in the start menu and look at "Target".
In my case, the Python executable is located at C:\Users\g_r_s\AppData\Local\Programs\Python\Python37\python.exe
Using Git Bash, you can export CLOUDSDK_PYTHON
$ export CLOUDSDK_PYTHON=/c/Users/g_r_s/AppData/Local/Programs/Python/Python37/python.exe
$ gcloud version
Google Cloud SDK 344.0.0
beta 2021.06.04
bq 2.0.69
core 2021.06.04
gsutil 4.62
NOTE: You can also try installing the bundled Python when you install the SDK on Windows as well.

Running supervisord in AWS Environment

I'm working on adding Django Channels on my elastic beanstalk enviorment, but running into trouble configuring supervisord. Specifically, in /.ebextensions I have a file channels.config with this code:
container_commands:
01_copy_supervisord_conf:
command: "cp .ebextensions/supervisord/supervisord.conf /opt/python/etc/supervisord.conf"
02_reload_supervisord:
command: "supervisorctl -c /opt/python/etc/supervisord.conf reload"
This errors on the 2nd command with the following error message, through the elastic beanstalk CLI:
Command failed on instance. Return code: 1 Output: error: <class
'FileNotFoundError'>, [Errno 2] No such file or directory:
file: /opt/python/run/venv/local/lib/python3.4/site-
packages/supervisor/xmlrpc.py line: 562.
container_command 02_reload_supervisord in
.ebextensions/channels.config failed.
My guess would be supervisor didn't install correctly, but because command 1 copies the files without an error, that leads me to think supervisor is indeed installed and I have an issue with the container command. Has anyone implemented supervisor in an AWS environment and can see where I'm going wrong?
You should be careful about python versions and exact installation paths ,
Here is how did it maybe it can help
packages:
yum:
python27-setuptools: []
container_commands:
01-supervise:
command: ".ebextensions/supervise.sh"
Here is the supervise.sh
#!/bin/bash
if [ "${SUPERVISE}" == "enable" ]; then
export HOME="/root"
export PATH="/sbin:/bin:/usr/sbin:/usr/bin:/opt/aws/bin"
easy_install supervisor
cat <<'EOB' > /etc/init.d/supervisord
# Source function library
. /etc/rc.d/init.d/functions
# Source system settings
if [ -f /etc/sysconfig/supervisord ]; then
. /etc/sysconfig/supervisord
fi
# Path to the supervisorctl script, server binary,
# and short-form for messages.
supervisorctl=${SUPERVISORCTL-/usr/bin/supervisorctl}
supervisord=${SUPERVISORD-/usr/bin/supervisord}
prog=supervisord
pidfile=${PIDFILE-/var/run/supervisord.pid}
lockfile=${LOCKFILE-/var/lock/subsys/supervisord}
STOP_TIMEOUT=${STOP_TIMEOUT-60}
OPTIONS="${OPTIONS--c /etc/supervisord.conf}"
RETVAL=0
start() {
echo -n $"Starting $prog: "
daemon --pidfile=${pidfile} $supervisord $OPTIONS
RETVAL=$?
echo
if [ $RETVAL -eq 0 ]; then
touch ${lockfile}
$supervisorctl $OPTIONS status
fi
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc -p ${pidfile} -d ${STOP_TIMEOUT} $supervisord
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -rf ${lockfile} ${pidfile}
}
reload() {
echo -n $"Reloading $prog: "
LSB=1 killproc -p $pidfile $supervisord -HUP
RETVAL=$?
echo
if [ $RETVAL -eq 7 ]; then
failure $"$prog reload"
else
$supervisorctl $OPTIONS status
fi
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p ${pidfile} $supervisord
RETVAL=$?
[ $RETVAL -eq 0 ] && $supervisorctl $OPTIONS status
;;
restart)
restart
;;
condrestart|try-restart)
if status -p ${pidfile} $supervisord >&/dev/null; then
stop
start
fi
;;
force-reload|reload)
reload
;;
*)
echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|reload}"
RETVAL=2
esac
exit $RETVAL
EOB
chmod +x /etc/init.d/supervisord
cat <<'EOB' > /etc/sysconfig/supervisord
# Configuration file for the supervisord service
#
# Author: Jason Koppe <jkoppe#indeed.com>
# orginal work
# Erwan Queffelec <erwan.queffelec#gmail.com>
# adjusted to new LSB-compliant init script
# make sure elasticbeanstalk PARAMS are being passed through to supervisord
. /opt/elasticbeanstalk/support/envvars
# WARNING: change these wisely! for instance, adding -d, --nodaemon
# here will lead to a very undesirable (blocking) behavior
#OPTIONS="-c /etc/supervisord.conf"
PIDFILE=/var/run/supervisord/supervisord.pid
#LOCKFILE=/var/lock/subsys/supervisord.pid
# Path to the supervisord binary
SUPERVISORD=/usr/local/bin/supervisord
# Path to the supervisorctl binary
SUPERVISORCTL=/usr/local/bin/supervisorctl
# How long should we wait before forcefully killing the supervisord process ?
#STOP_TIMEOUT=60
# Remove this if you manage number of open files in some other fashion
#ulimit -n 96000
EOB
mkdir -p /var/run/supervisord/
chown webapp: /var/run/supervisord/
cat <<'EOB' > /etc/supervisord.conf
[unix_http_server]
file=/tmp/supervisor.sock
chmod=0777
[supervisord]
logfile=/var/app/support/logs/supervisord.log
logfile_maxbytes=0
logfile_backups=0
loglevel=warn
pidfile=/var/run/supervisord/supervisord.pid
nodaemon=false
nocleanup=true
user=webapp
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///tmp/supervisor.sock
[program:process-ipn-api-gpsfsoft]
command = -- command that u want to run ---
directory = /var/app/current/
user = webapp
autorestart = true
startsecs = 0
numprocs = 10
process_name = -- process name that u want ---
EOB
# this is now a little tricky, not officially documented, so might break but it is the cleanest solution
# first before the "flip" is done (e.g. switch between ondeck vs current) lets stop supervisord
echo -e '#!/usr/bin/env bash\nservice supervisord stop' > /opt/elasticbeanstalk/hooks/appdeploy/enact/00_stop_supervisord.sh
chmod +x /opt/elasticbeanstalk/hooks/appdeploy/enact/00_stop_supervisord.sh
# then right after the webserver is reloaded, we can start supervisord again
echo -e '#!/usr/bin/env bash\nservice supervisord start' > /opt/elasticbeanstalk/hooks/appdeploy/enact/99_z_start_supervisord.sh
chmod +x /opt/elasticbeanstalk/hooks/appdeploy/enact/99_z_start_supervisord.sh
fi
PS: You have define SUPERVISE as Enable in Elasticbeanstalk environment value to get this run.

How to show error on missing parameters for options?

I'm an absolute Clojure beginner and I'm trying to build a CLI app using the clojure.tools.cli library.
My problem is that I can't show any error when an option is not provided with required parameter.
What I want:
$ java -jar test.jar -m SAMPLE
Given file: SAMPLE
$ java -jar test.jar -m
ERROR: Please provide a file
What happens:
$ java -jar test.jar -m SAMPLE
Given file: SAMPLE
$ java -jar test.jar -m
$
It doesn't show anything.
Here is my code:
(ns obmed-clj.core
(:require [clojure.tools.cli :refer [parse-opts]])
(:gen-class))
(def cli-options
[["-m" "--menu FILE" "Provide menu file path"
:parse-fn #(if (nil? %)
(println "ERROR: Please provide a file")
%)
:validate-fn #(println "Given file:" %)]])
(defn -main [& args]
(parse-opts args cli-options))
You are abusing the -fn arguments here a little. Their use is to convert the "string" (in your case, since you have "--menu FILE") and then do additional validation on that (but rather use :validate [fn msg] instead). So e.g.:
user=> (def cli-opts [["-m" "--menu FILE" "menu file"
:parse-fn #(java.io.File. %)
:validate [#(.exists %) "file must exist"]]])
#'user/cli-opts
Missing argument:
user=> (parse-opts ["-m"] cli-opts)
{:arguments [],
:errors ["Missing required argument for \"-m FILE\""],
:options {},
:summary " -m, --menu FILE menu file"}
File not existing:
user=> (parse-opts ["-m" "XXX"] cli-opts)
{:arguments [],
:errors ["Failed to validate \"-m XXX\": file must exist"],
:options {},
:summary " -m, --menu FILE menu file"}
All is well:
user=> (parse-opts ["-m" "/etc/hosts"] cli-opts)
{:arguments [],
:errors nil,
:options {:menu #<java.io.File#34d63c80 /etc/hosts>},
:summary " -m, --menu FILE menu file"}

StrongLoop API Explorer Refresh

I've setup stongloop on an ec2.
Everything is running well. I can access the api explorer.
I use Strong Arc composer to discover models in the local mysql db, and make them public. I can see the exposed model on the file model-config.json in my app server folder.
But the explorer is not refreshing. I can't see the new models on the explorer. The solution I've found is to reboot the whole server, but I can't imagine this is the only solution. Is someone has a clue ?
Thanks,
So the easiest solution I found is to kill the process and then relaunch using the following command:
nohup service slc-initd start
Noting that my slc-initd is the following script in my init.d folder (no credit to me for this script):
#!/usr/bin/env bash
# chkconfig: 345 99 01
# description: startup of slc loopback
NAME="Init.d SLC"
NODE_BIN_DIR="/usr/bin"
NODE_PATH="/usr/lib/node_modules"
APPLICATION_DIRECTORY="/home/ec2-user/dev/mpos"
#APPLICATION_START="src/cluster-worker.js"
PIDFILE="/var/run/initd-example.pid"
LOGFILE="/var/log/slc-initd.log"
start() {
echo "Starting $NAME"
echo "cd $APPLICATION_DIRECTORY"
cd $APPLICATION_DIRECTORY
echo "slc run --pid $PIDFILE --log $LOGFILE"
slc run --pid $PIDFILE --log $LOGFILE
RETVAL=$?
}
stop() {
if [ -f $PIDFILE ]; then
echo "Shutting down $NAME"
echo "cd $APPLICATION_DIRECTORY"
cd $APPLICATION_DIRECTORY
echo "slc runctl stop"
slc runctl stop
# No need to get rid of the pidfile, slc does that for us.
RETVAL=$?
else
echo "$NAME is not running."
RETVAL=0
fi
}
restart() {
if [ -f $PIDFILE ]; then
echo "Restarting $NAME"
echo "cd $APPLICATION_DIRECTORY"
cd $APPLICATION_DIRECTORY
echo "slc runctl restart"
slc runctl restart
else
echo "$NAME isn't currently running. Starting from scratch ..."
start
fi
}
status() {
echo "Status for $NAME:"
cd $APPLICATION_DIRECTORY
slc runctl status
RETVAL=$?
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
restart
;;
*)
echo "Usage: {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL

Vagrant VM not getting Django and others requirements

I'm using Vagrant and Chef solo to setup my django dev environment. Using Chef Solo I successfully install my packages (vim, git, apt, python, mysql) but then when I setup my project using pip to download/install my requirements (django, south, django-registration, etc), these ones are not correctly downloaded/found in my fresh VM.
I'm not sure if it's a location issue, but it's downloading and I have only warnings, never errors, but then it's not at the supposed location (I have another project setup exactly the same and it works, so maybe I'm missing something here...).
Here is my Vagrantfile:
Vagrant::Config.run do |config|
config.vm.define :djangovm do |django_config|
# Every Vagrant virtual environment requires a box to build off of.
django_config.vm.box = "lucid64"
# The url from where the 'config.vm.box' box will be fetched if it
# doesn't already exist on the user's system.
django_config.vm.box_url = "http://files.vagrantup.com/lucid64.box"
# Forward a port from the guest to the host, which allows for outside
# computers to access the VM, whereas host only networking does not.
django_config.vm.forward_port 80, 8080
django_config.vm.forward_port 8000, 8001
# Enable provisioning with chef solo, specifying a cookbooks path (relative
# to this Vagrantfile), and adding some recipes and/or roles.
django_config.vm.provision :chef_solo do |chef|
chef.json = {
python: {
install_method: 'source',
version: '2.7.5',
checksum: 'b4f01a1d0ba0b46b05c73b2ac909b1df'
},
mysql: {
server_root_password: 'root',
server_debian_password: 'root',
server_repl_password: 'root'
},
}
chef.cookbooks_path = "vagrant_resources/cookbooks"
chef.add_recipe "apt"
chef.add_recipe "build-essential"
chef.add_recipe "git"
chef.add_recipe "vim"
chef.add_recipe "openssl"
chef.add_recipe "mysql::client"
chef.add_recipe "mysql::server"
chef.add_recipe "python"
end
django_config.vm.provision :shell, :path => "vagrant_resources/vagrant_bootstrap.sh"
end
end
And here the bootstrap file to download Django and continue setting up things:
#!/usr/bin/env bash
eval vagrantfile_location="~/.vagrantfile_processed"
if [ -f $vagrantfile_location ]; then
echo "Vagrantfile already processed. Exiting..."
exit 0
fi
#==================================================================
# install dependencies
#==================================================================
/usr/bin/yes | pip install --upgrade pip
/usr/bin/yes | pip install --upgrade virtualenv
/usr/bin/yes | sudo apt-get install python-software-properties
#==================================================================
# set up the local dev environment
#==================================================================
if [ -f "/home/vagrant/.bash_profile" ]; then
echo -n "removing .bash_profile for user vagrant..."
rm /home/vagrant/.bash_profile
echo "done!"
fi
echo -n "creating new .bash_profile for user vagrant..."
ln -s /vagrant/.bash_profile /home/vagrant/.bash_profile
source /home/vagrant/.bash_profile
echo "done!"
#==================================================================
# set up virtual env
#==================================================================
cd /vagrant;
echo -n "Creating virtualenv..."
virtualenv myquivers;
echo "done!"
echo -n "Activating virtualenv..."
source /vagrant/myquivers/bin/activate
echo "done!"
echo -n "installing project dependencies via pip..."
/usr/bin/yes | pip install -r /vagrant/myquivers/myquivers/requirements/dev.txt
echo "done!"
#==================================================================
# install front-endy things
#==================================================================
echo -n "adding node.js npm repo..."
add-apt-repository ppa:chris-lea/node.js &> /dev/null || exit 1
echo "done!"
echo -n "calling apt-get update..."
apt-get update &> /dev/null || exit 1
echo "done!"
echo -n "nodejs and npm..."
apt-get install nodejs npm &> /dev/null || exit 1
echo "done!"
echo -n "installing grunt..."
npm install -g grunt-cli &> /dev/null || exit 1
echo "done!"
echo -n "installing LESS..."
npm install -g less &> /dev/null || exit 1
echo "done!"
echo -n "installing uglify.js..."
npm install -g uglify-js &> /dev/null || exit 1
echo "done!"
#==================================================================
# cleanup
#==================================================================
echo -n "marking vagrant as processed..."
touch $vagrantfile_location
echo "done!"
My requirements dev.txt looks like this:
Django==1.5.1
Fabric==1.7.0
South==0.8.2
Pillow==2.1.0
django-less==0.7.2
paramiko==1.11.0
psycopg2==2.5.1
pycrypto==2.6
wsgiref==0.1.2
django-registration==1.0
Any idea why I can't find Django and my other things in my VM?
This is a whole 'nother path, but I highly recommend using Berkshelf and doing it the Berkshelf way. There's a great guide online for rolling them this way.
That is, create a cookbook as a wrapper that will do everything your script does.
So the solution was to remove the dependency with Postgre psycopg2==2.5.1 I have in my requirements (from the setup in my other project), because here I'll be having a MySQL database instead.