Run script as specified user - clojure

I'm trying to run whoami on my local machine as user pallet and it seems to keep wanting to run as user deadghost.
(ns localhost.idk
(:require (pallet [compute :as compute]
[api :as api]
[actions :as actions])))
(def my-data-center
(compute/instantiate-provider
"node-list"
:node-list [["localhost" "whoami" "127.0.0.1" :ubuntu]]))
(def user-pallet
(api/make-user "pallet"
:password "pallet"
:sudo-password nil
:public-key-path "/home/deadghost/.ssh/id_rsa.pub"
:private-key-path "/home/deadghost/.ssh/id_rsa"
:passphrase "my-ssh-passphrase-here"))
(pallet.api/lift
(pallet.api/group-spec
"whoami"
:phases {:configure (api/plan-fn
(pallet.actions/exec-script
("whoami")))})
:compute my-data-center
:user user-pallet)
Return value of lift. Output is deadghost:
{:initial-plan-state {:pallet.action/action-options {}},
:environment
{:compute #<NodeList pallet.compute.node_list.NodeList#b63e98>,
:user
{:username "pallet",
:public-key-path "/home/deadghost/.ssh/id_rsa.pub",
:private-key-path "/home/deadghost/.ssh/id_rsa",
:public-key nil,
:private-key nil,
:passphrase "my-ssh-passphrase-here",
:password "pallet",
:sudo-password nil,
:no-sudo nil,
:sudo-user nil,
:state-root nil,
:state-group nil}},
:results
({:target
{:group-name :whoami,
:default-phases [:configure],
:phases
{:pallet/os
#<api$lift_STAR_$fn__14140 pallet.api$lift_STAR_$fn__14140#1db467f>,
:configure
#<idk$eval14723$fn__14725 localhost.idk$eval14723$fn__14725#baacb>},
:group-names #{:whoami},
:node
{:name "localhost",
:group-name "whoami",
:ip "127.0.0.1",
:os-family :ubuntu,
:os-version nil,
:id "localhost-127-0-0-1",
:ssh-port 22,
:private-ip nil,
:is-64bit true,
:running true,
:service #<NodeList pallet.compute.node_list.NodeList#b63e98>,
:hardware nil,
:proxy nil,
:image-user nil}},
:target-type nil,
:plan-state
{:host
{"localhost-127-0-0-1"
{:pallet/os
{nil
{:context "os: ",
:action-symbol clj-action13784,
:id "Ubuntu",
:release "14.04",
:mach "i686",
:rev "3.13.0-39-generic",
:os "Linux",
:os-family :ubuntu,
:os-version "14.04"}}}},
:pallet.action/action-options {}},
:result
({:err "",
:out "pallet:x:1001:1001:,,,:/home/pallet:/bin/bash\n",
:exit 0,
:flags #{},
:flag-values {},
:script
"#!/usr/bin/env bash\nmkdir -p /home/deadghost || exit 1\ncd /home/deadghost\nset -h\nif getent passwd pallet; then :;else /usr/sbin/useradd pallet;fi\nexit $?",
:action-symbol pallet.actions/user,
:context nil}
{:err "",
:out "deadghost\n", <------------ OUTPUTS DEADGHOST INSTEAD OF PALLET
:exit 0,
:flags #{},
:flag-values {},
:script
"#!/usr/bin/env bash\nmkdir -p /home/deadghost || exit 1\ncd /home/deadghost\nset -h\nwhoami\nexit $?",
:action-symbol pallet.actions/exec-script*,
:context nil}),
:phase :configure}),
:targets
({:group-name :whoami,
:default-phases [:configure],
:phases
{:pallet/os
#<api$lift_STAR_$fn__14140 pallet.api$lift_STAR_$fn__14140#1db467f>,
:configure
#<idk$eval14723$fn__14725 localhost.idk$eval14723$fn__14725#baacb>},
:group-names #{:whoami},
:node
{:name "localhost",
:group-name "whoami",
:ip "127.0.0.1",
:os-family :ubuntu,
:os-version nil,
:id "localhost-127-0-0-1",
:ssh-port 22,
:private-ip nil,
:is-64bit true,
:running true,
:service #<NodeList pallet.compute.node_list.NodeList#b63e98>,
:hardware nil,
:proxy nil,
:image-user nil}}),
:plan-state
{:node-values
{nv14946
{:err "",
:out "pallet:x:1001:1001:,,,:/home/pallet:/bin/bash\n",
:exit 0,
:flags #{},
:flag-values {},
:script
"#!/usr/bin/env bash\nmkdir -p /home/deadghost || exit 1\ncd /home/deadghost\nset -h\nif getent passwd pallet; then :;else /usr/sbin/useradd pallet;fi\nexit $?",
:action-symbol pallet.actions/user,
:context nil},
nv14945
{:err "",
:out "deadghost\n",
:exit 0,
:flags #{},
:flag-values {},
:script
"#!/usr/bin/env bash\nmkdir -p /home/deadghost || exit 1\ncd /home/deadghost\nset -h\nwhoami\nexit $?",
:action-symbol pallet.actions/exec-script*,
:context nil},
nv14933
{:id "Ubuntu",
:release "14.04",
:mach "i686",
:rev "3.13.0-39-generic",
:os "Linux",
:os-family :ubuntu,
:os-version "14.04",
:action-symbol clj-action13784,
:context "os: "},
nv14932
{:os-version "14.04",
:os-family :ubuntu,
:os "Linux",
:rev "3.13.0-39-generic",
:mach "i686",
:release "14.04",
:id "Ubuntu",
:action-symbol clj-action13784,
:context "os: "},
nv14931
{:release "14.04",
:id "Ubuntu",
:os-family :ubuntu,
:os-version "14.04",
:action-symbol clj-action13760,
:context "os: infer-distro: "},
nv14930
{:err "",
:out "{\n:id \"Ubuntu\"\n:release \"14.04\"\n}\n",
:exit 0,
:flags #{},
:flag-values {},
:script
"#!/usr/bin/env bash\nmkdir -p /home/deadghost || exit 1\ncd /home/deadghost\nset -h\nif [ -e /etc/debconf_version ]; then\nID=$(cat /etc/redhat-release | egrep -o -e '^[A-Za-z ]+release' | sed -e 's/ release//')\nRELEASE=$(lsb_release -s -r)\nfi\nif [ -e /etc/lsb-release ]; then\nsource /etc/lsb-release\nID=${DISTRIB_ID}\nRELEASE=${DISTRIB_RELEASE}\nfi\nif [ -e /etc/redhat-release ]; then\nID=$(cat /etc/redhat-release | egrep -o -e '^[A-Za-z ]+release' | sed -e 's/ release//')\nRELEASE=$(cat /etc/redhat-release | sed -e 's/.*release//' | sed -e 's/[^0-9.]//g')\nfi\nif [ -e /etc/SUSE-release ]; then\nID=$(cat /etc/SUSE-release | tr ' ' | sed -e 's/VERSION.*//')\nRELEASE=$(cat /etc/SUSE-release | tr ' ' | sed -e 's/.*= //')\nfi\nif [ -e /etc/mandrake-release ]; then\nID=Mandrake\nRELEASE=$(cat /etc/mandrake-release | sed -e 's/.*release //' | sed -e 's/ .*//')\nfi\necho {\necho :id '\"'${ID:-unknown}'\"'\necho :release '\"'${RELEASE:-unknown}'\"'\necho }\n\nexit $?",
:action-symbol pallet.actions/exec-script*,
:context "os: infer-distro: "},
nv14929
{:mach "i686",
:rev "3.13.0-39-generic",
:os "Linux",
:os-family :linux,
:os-version "3.13.0-39-generic",
:action-symbol clj-action13735,
:context "os: infer-os: "},
nv14928
{:err "",
:out
"{\n:os \"Linux\"\n:rev \"3.13.0-39-generic\"\n:mach \"i686\"\n}\n",
:exit 0,
:flags #{},
:flag-values {},
:script
"#!/usr/bin/env bash\nmkdir -p /home/deadghost || exit 1\ncd /home/deadghost\nset -h\necho {\necho :os '\"'$(uname -s)'\"'\necho :rev '\"'$(uname -r)'\"'\necho :mach '\"'$(uname -m)'\"'\necho }\n\nexit $?",
:action-symbol pallet.actions/exec-script*,
:context "os: infer-os: "}},
:host
{"localhost-127-0-0-1"
{:pallet/os
{nil
{:context "os: ",
:action-symbol clj-action13784,
:id "Ubuntu",
:release "14.04",
:mach "i686",
:rev "3.13.0-39-generic",
:os "Linux",
:os-family :ubuntu,
:os-version "14.04"}}}},
:pallet.action/action-options {}}}
Taken from pallet.log:
2014-11-13 08:08:04,333 DEBUG [operate-92] p.c.operations lift :phases [:pallet/os :settings] :targets [:ed]
2014-11-13 08:08:04,342 DEBUG [operate-94] p.c.primitives build-and-execute-phase :pallet/os on 1 target(s)
2014-11-13 08:08:04,342 DEBUG [operate-94] p.c.api-impl target-os-details node {:packager :apt, :os-family :ubuntu} detected {}
2014-11-13 08:08:04,342 DEBUG [operate-94] p.c.api-impl Script context: [:ubuntu :apt]
2014-11-13 08:08:04,345 DEBUG [operate-94] p.core.api environment-execution-settings {:compute #<NodeList pallet.compute.node_list.NodeList#1ac4ccd>, :user #pallet.core.user.User{:username "pallet", :public-key-path "/home/deadghost/.ssh/id_rsa.pub", :private-key-path "/home/deadghost/.ssh/id_rsa", :public-key nil, :private-key nil, :passphrase "my-ssh-passphrase-here", :password "pallet", :sudo-password nil, :no-sudo nil, :sudo-user nil, :state-root nil, :state-group nil}}
2014-11-13 08:08:04,345 DEBUG [operate-94] p.core.api Env user pallet.core.user.User#d8ec4e52
2014-11-13 08:08:04,346 DEBUG [operate-94] p.c.api-impl target-os-details node {:packager :apt, :os-family :ubuntu} detected {}
2014-11-13 08:08:04,346 DEBUG [operate-94] p.c.api-impl Script context: [:ubuntu :apt]
2014-11-13 08:08:04,347 DEBUG [operate-94] p.l.execute localhost ==> -----------------------------------------
2014-11-13 08:08:04,347 DEBUG [operate-94] p.l.execute localhost ==> #!/usr/bin/env bash
2014-11-13 08:08:04,347 DEBUG [operate-94] p.l.execute localhost ==> mkdir -p /home/deadghost || exit 1
2014-11-13 08:08:04,347 DEBUG [operate-94] p.l.execute localhost ==> cd /home/deadghost
2014-11-13 08:08:04,347 DEBUG [operate-94] p.l.execute localhost ==> set -h
2014-11-13 08:08:04,347 DEBUG [operate-94] p.l.execute localhost ==> echo {
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==> echo :os '"'$(uname -s)'"'
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==> echo :rev '"'$(uname -r)'"'
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==> echo :mach '"'$(uname -m)'"'
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==> echo }
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==>
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==> exit $?
2014-11-13 08:08:04,348 DEBUG [operate-94] p.l.execute localhost ==> ------------------------------------------
2014-11-13 08:08:04,358 DEBUG [operate-94] p.l.execute localhost <== ----------------------------------------
2014-11-13 08:08:04,358 DEBUG [operate-94] p.script-builder {:script-dir "/home/deadghost", :default-script-prefix :no-sudo}
2014-11-13 08:08:04,358 DEBUG [operate-94] p.script-builder prefix kw :no-sudo
2014-11-13 08:08:04,358 DEBUG [operate-94] p.l.execute localhost {:env-cmd "/usr/bin/env", :env nil, :env-fwd [:SSH_AUTH_SOCK], :prefix nil, :execv ("/bin/bash" "/tmp/pallet8828719639020449641script")}
2014-11-13 08:08:05,360 DEBUG [operate-94] p.execute localhost <== {
2014-11-13 08:08:05,360 DEBUG [operate-94] p.execute localhost <== :os "Linux"
2014-11-13 08:08:05,360 DEBUG [operate-94] p.execute localhost <== :rev "3.13.0-39-generic"
2014-11-13 08:08:05,360 DEBUG [operate-94] p.execute localhost <== :mach "i686"
2014-11-13 08:08:05,360 DEBUG [operate-94] p.execute localhost <== }
2014-11-13 08:08:05,361 DEBUG [operate-94] p.l.execute localhost <== ----------------------------------------
2014-11-13 08:08:05,362 DEBUG [operate-94] p.l.execute clojure-on-origin pallet.crate.os$infer_os$action__4279__auto___direct__13736$clj_action13735__13737#15798c9
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> -----------------------------------------
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> #!/usr/bin/env bash
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> mkdir -p /home/deadghost || exit 1
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> cd /home/deadghost
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> set -h
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> if [ -e /etc/debconf_version ]; then
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> ID=$(cat /etc/redhat-release | egrep -o -e '^[A-Za-z ]+release' | sed -e 's/ release//')
2014-11-13 08:08:05,367 DEBUG [operate-94] p.l.execute localhost ==> RELEASE=$(lsb_release -s -r)
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> fi
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> if [ -e /etc/lsb-release ]; then
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> source /etc/lsb-release
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> ID=${DISTRIB_ID}
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> RELEASE=${DISTRIB_RELEASE}
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> fi
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> if [ -e /etc/redhat-release ]; then
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> ID=$(cat /etc/redhat-release | egrep -o -e '^[A-Za-z ]+release' | sed -e 's/ release//')
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> RELEASE=$(cat /etc/redhat-release | sed -e 's/.*release//' | sed -e 's/[^0-9.]//g')
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> fi
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> if [ -e /etc/SUSE-release ]; then
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> ID=$(cat /etc/SUSE-release | tr ' ' | sed -e 's/VERSION.*//')
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> RELEASE=$(cat /etc/SUSE-release | tr ' ' | sed -e 's/.*= //')
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> fi
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> if [ -e /etc/mandrake-release ]; then
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> ID=Mandrake
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> RELEASE=$(cat /etc/mandrake-release | sed -e 's/.*release //' | sed -e 's/ .*//')
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> fi
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> echo {
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> echo :id '"'${ID:-unknown}'"'
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> echo :release '"'${RELEASE:-unknown}'"'
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> echo }
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==>
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> exit $?
2014-11-13 08:08:05,368 DEBUG [operate-94] p.l.execute localhost ==> ------------------------------------------
2014-11-13 08:08:05,371 DEBUG [operate-94] p.l.execute localhost <== ----------------------------------------
2014-11-13 08:08:05,371 DEBUG [operate-94] p.script-builder {:script-dir "/home/deadghost", :default-script-prefix :no-sudo}
2014-11-13 08:08:05,371 DEBUG [operate-94] p.script-builder prefix kw :no-sudo
2014-11-13 08:08:05,372 DEBUG [operate-94] p.l.execute localhost {:env-cmd "/usr/bin/env", :env nil, :env-fwd [:SSH_AUTH_SOCK], :prefix nil, :execv ("/bin/bash" "/tmp/pallet4201262849437841643script")}
2014-11-13 08:08:06,373 DEBUG [operate-94] p.execute localhost <== {
2014-11-13 08:08:06,373 DEBUG [operate-94] p.execute localhost <== :id "Ubuntu"
2014-11-13 08:08:06,374 DEBUG [operate-94] p.execute localhost <== :release "14.04"
2014-11-13 08:08:06,374 DEBUG [operate-94] p.execute localhost <== }
2014-11-13 08:08:06,375 DEBUG [operate-94] p.l.execute localhost <== ----------------------------------------
2014-11-13 08:08:06,375 DEBUG [operate-94] p.l.execute clojure-on-origin pallet.crate.os$infer_distro$action__4279__auto___direct__13761$clj_action13760__13762#1c412ce
2014-11-13 08:08:06,375 DEBUG [operate-94] p.l.execute clojure-on-origin pallet.crate.os$os$action__4279__auto___direct__13785$clj_action13784__13786#1aebe5f
2014-11-13 08:08:06,376 DEBUG [operate-94] p.l.execute clojure-on-origin pallet.actions.direct.settings$eval12391$assoc_settings_direct__12393$fn__12395#1546d95
2014-11-13 08:08:06,386 DEBUG [operate-93] p.c.primitives build-and-execute-phase :settings on 1 target(s)
2014-11-13 08:08:06,397 DEBUG [operate-94] p.c.operations lift-partitions :phases [:configure] :targets [:ed]
2014-11-13 08:08:06,400 DEBUG [operate-93] p.c.operations lift :phases [:configure] :targets [:ed]
2014-11-13 08:08:06,404 DEBUG [operate-94] p.c.primitives build-and-execute-phase :configure on 1 target(s)
2014-11-13 08:08:06,405 DEBUG [operate-94] p.c.api-impl target-os-details node {:packager :apt, :os-family :ubuntu} detected {:os-version "14.04", :os-family :ubuntu}
2014-11-13 08:08:06,405 DEBUG [operate-94] p.c.api-impl Script context: [:ubuntu :apt :ubuntu-14.04]
2014-11-13 08:08:06,412 DEBUG [operate-92] p.core.api environment-execution-settings {:compute #<NodeList pallet.compute.node_list.NodeList#1ac4ccd>, :user #pallet.core.user.User{:username "pallet", :public-key-path "/home/deadghost/.ssh/id_rsa.pub", :private-key-path "/home/deadghost/.ssh/id_rsa", :public-key nil, :private-key nil, :passphrase "my-ssh-passphrase-here", :password "pallet", :sudo-password nil, :no-sudo nil, :sudo-user nil, :state-root nil, :state-group nil}}
2014-11-13 08:08:06,413 DEBUG [operate-92] p.core.api Env user pallet.core.user.User#d8ec4e52
2014-11-13 08:08:06,415 DEBUG [operate-92] p.c.api-impl target-os-details node {:packager :apt, :os-family :ubuntu} detected {:os-version "14.04", :os-family :ubuntu}
2014-11-13 08:08:06,415 DEBUG [operate-92] p.c.api-impl Script context: [:ubuntu :apt :ubuntu-14.04]
2014-11-13 08:08:06,416 DEBUG [operate-92] p.l.execute localhost ==> -----------------------------------------
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> #!/usr/bin/env bash
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> mkdir -p /home/deadghost || exit 1
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> cd /home/deadghost
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> set -h
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> whoami
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> exit $?
2014-11-13 08:08:06,417 DEBUG [operate-92] p.l.execute localhost ==> ------------------------------------------
2014-11-13 08:08:06,421 DEBUG [operate-92] p.l.execute localhost <== ----------------------------------------
2014-11-13 08:08:06,421 DEBUG [operate-92] p.script-builder {:script-dir "/home/deadghost", :default-script-prefix :no-sudo}
2014-11-13 08:08:06,421 DEBUG [operate-92] p.script-builder prefix kw :no-sudo
2014-11-13 08:08:06,421 DEBUG [operate-92] p.l.execute localhost {:env-cmd "/usr/bin/env", :env nil, :env-fwd [:SSH_AUTH_SOCK], :prefix nil, :execv ("/bin/bash" "/tmp/pallet3318318864438163863script")}
2014-11-13 08:08:07,427 DEBUG [operate-92] p.execute localhost <== deadghost
2014-11-13 08:08:07,428 DEBUG [operate-92] p.l.execute localhost <== ----------------------------------------

Not sure, what your example shows and what the question is ...
Why do you try to create an user from your pallet-user (I talk about "(actions/user (:username user-pallet))" )?
Pls. remove to make your Question more clear.
Does the pallet-user exist on your localhost?
Is it sudo able?
What show's the console output?
With console output I mean sth. like
20:57:56.305 [operate-57] DEBUG pallet.ssh.execute - authentication {:sudo-user nil, :state-group nil, :private-key nil, :public-key nil, :sudo-password "*******", :private-key-path nil, :no-sudo false, :public-key-path nil, :passphrase nil, :username "initial", :state-root nil, :password "*******"}
20:57:56.305 [operate-57] DEBUG pallet.ssh.transport - SSH user initial :private-key-path null :private-key null :password *******
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> -----------------------------------------
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> #!/usr/bin/env bash
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> set -h
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> echo {
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> echo :os '"'$(uname -s)'"'
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> echo :rev '"'$(uname -r)'"'
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> echo :mach '"'$(uname -m)'"'
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> echo }
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==>
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> exit $?
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 ==> ------------------------------------------
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15:22 send script via /tmp/palletxhoLT as root
20:57:56.407 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 <== ----------------------------------------
20:57:56.409 [operate-57] DEBUG pallet.ssh.transport - send-text set mode /tmp/palletxhoLT 384
20:57:56.411 [operate-57] DEBUG pallet.script-builder - {}
20:57:56.411 [operate-57] DEBUG pallet.script-builder - prefix kw :sudo
20:57:56.412 [operate-57] DEBUG pallet.script-builder - prefix sudo {:sudo-user nil, :node-value-path nv20675, :state-group nil, :context ("os" "infer-os"), :private-key nil, :public-key nil, :sudo-password "test123", :private-key-path nil, :args ("echo {\necho :os '\"'$(uname -s)'\"'\necho :rev '\"'$(uname -r)'\"'\necho :mach '\"'$(uname -m)'\"'\necho }\n"), :no-sudo false, :action {:action-symbol pallet.actions/exec-script*, :impls #<Atom#679f2b31: {:direct {:f #<exec_script$eval11637$exec_script_STAR__direct__11638 pallet.actions.direct.exec_script$eval11637$exec_script_STAR__direct__11638#705e0e14>, :metadata {:location :target, :action-type :script}}}>, :execution :in-sequence, :precedence {}}, :public-key-path nil, :passphrase nil, :username "initial", :state-root nil, :password "test123"}
20:57:56.412 [operate-57] DEBUG pallet.script-builder - sudo-cmd-for {:username "initial", :sudo-password "test123", :sudo-user nil, :password "test123", :no-sudo false}
20:57:56.412 [operate-57] DEBUG pallet.script-builder - prefix echo 'test123' | /usr/bin/sudo -S
20:57:56.412 [operate-57] DEBUG pallet.ssh.execute - ssh-script-on-target command {:env-cmd "/usr/bin/env", :env nil, :env-fwd [:SSH_AUTH_SOCK], :prefix ["echo" "'test123'" "|" "/usr/bin/sudo" "-S"], :execv ("/bin/bash" "/tmp/palletxhoLT")}
20:57:56.514 [operate-57] DEBUG pallet.execute - 192.168.35.15 <== [sudo] password for initial: {
20:57:56.514 [operate-57] DEBUG pallet.execute - 192.168.35.15 <== :os "Linux"
20:57:56.514 [operate-57] DEBUG pallet.execute - 192.168.35.15 <== :rev "3.5.0-23-generic"
20:57:56.514 [operate-57] DEBUG pallet.execute - 192.168.35.15 <== :mach "x86_64"
20:57:56.514 [operate-57] DEBUG pallet.execute - 192.168.35.15 <== }
20:57:56.616 [operate-57] DEBUG pallet.ssh.execute - 192.168.35.15 <== ----------------------------------------
20:57:56.616 [operate-57] DEBUG pallet.executors - default-executor

Related

When creating directory with Ansible it doesnt appear

I have this simple ansible flow: I want to create a directory on the host:
- name: Create rails app dir
file: path=/etc/rails-app state=directory mode=0755
register: rails_app_dir
And these are the logs when I run the playbook:
TASK [instance_deploy_app : Create rails app dir] *************************************************************************************************
task path: /etc/ansible/roles/instance_deploy_app/tasks/main.yml:39
<IPv4 of host> ESTABLISH LOCAL CONNECTION FOR USER: root
<IPv4 of host> EXEC /bin/sh -c 'echo ~root && sleep 0'
<IPv4 of host> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir "` echo /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297 `" && echo ansible-tmp-1645566978.53-25820-207749605236297="` echo /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297 `" ) && sleep 0'
Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/file.py
<IPv4 of host> PUT /root/.ansible/tmp/ansible-local-25617Cg_rWo/tmpTPHs3p TO /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297/AnsiballZ_file.py
<IPv4 of host> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297/ /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297/AnsiballZ_file.py && sleep 0'
<IPv4 of host> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297/AnsiballZ_file.py && sleep 0'
<IPv4 of host> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1645566978.53-25820-207749605236297/ > /dev/null 2>&1 && sleep 0'
ok: [IPv4 of host] => {
"changed": false,
"diff": {
"after": {
"path": "/etc/rails-app"
},
"before": {
"path": "/etc/rails-app"
}
},
"gid": 0,
"group": "root",
"invocation": {
"module_args": {
"_diff_peek": null,
"_original_basename": null,
"access_time": null,
"access_time_format": "%Y%m%d%H%M.%S",
"attributes": null,
"backup": null,
"content": null,
"delimiter": null,
"directory_mode": null,
"follow": true,
"force": false,
"group": null,
"mode": "0755",
"modification_time": null,
"modification_time_format": "%Y%m%d%H%M.%S",
"owner": null,
"path": "/etc/rails-app",
"recurse": false,
"regexp": null,
"remote_src": null,
"selevel": null,
"serole": null,
"setype": null,
"seuser": null,
"src": null,
"state": "directory",
"unsafe_writes": null
}
},
"mode": "0755",
"owner": "root",
"path": "/etc/rails-app",
"size": 41,
"state": "directory",
"uid": 0
}
Read vars_file 'roles/instance_deploy_app/vars/instance_vars.yml'
Read vars_file 'roles/instance_deploy_app/vars/aws_cred.yml'
According to the logs, the directory should be there but when I try to access /etc/rails-app/ it is not there. I currently have 3 users in the AWS EC2 instance: ec2-user, root and user1 and I tried to check in all of them but the directory doesnt appear.
Am I doing something wrong? Thanks!
The reason why it was not creating the folder, as β.εηοιτ.βε suggested, is because in the playbook I had connection: local so it was "never connecting to my EC2 and always acting on my controller". Once I removed that, it worked.

Can't connect to PostGreSQL from a same-host container

I am using docker to manage my Django apps, and have the same configuration on my laptop and digital ocean :
From my laptop I can connect to PostGreSQL thanks to the adminR image (https://hub.docker.com/_/adminer)
But if I try to connect to PostGreSQL from adminer on the localhost, I can't :
I can ping and find PostGreSQL from the django container :
But I can't migrate my database from django scripts :
Funny enough, I can migrate on the digital ocean cloud from my laptop :
I can see the updated database on my laptop's admineR page :
So the issue is obviously an issue of networking between the containers... But if I can ping the service, why can't django access it ????
EDIT:
1° ip route :
ip route
default via 167.99.80.1 dev eth0 proto static
10.16.0.0/16 dev eth0 proto kernel scope link src 10.16.0.5
10.106.0.0/20 dev eth1 proto kernel scope link src 10.106.0.2
167.99.80.0/20 dev eth0 proto kernel scope link src 167.99.94.16
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
172.18.0.0/16 dev br-ec478ce025ee proto kernel scope link src 172.18.0.1 linkdown
iptable -S
root#docker-s-1vcpu-1gb-lon1-01:~# apt install iptables
Reading package lists... Done
Building dependency tree
Reading state information... Done
iptables is already the newest version (1.8.4-3ubuntu2).
iptables set to manually installed.
0 upgraded, 0 newly installed, 0 to remove and 34 not upgraded.
root#docker-s-1vcpu-1gb-lon1-01:~# iptables -S
-P INPUT DROP
-P FORWARD DROP
-P OUTPUT ACCEPT
-N DOCKER
-N DOCKER-ISOLATION-STAGE-1
-N DOCKER-ISOLATION-STAGE-2
-N DOCKER-USER
-N ufw-after-forward
-N ufw-after-input
-N ufw-after-logging-forward
-N ufw-after-logging-input
-N ufw-after-logging-output
-N ufw-after-output
-N ufw-before-forward
-N ufw-before-input
-N ufw-before-logging-forward
-N ufw-before-logging-input
-N ufw-before-logging-output
-N ufw-before-output
-N ufw-logging-allow
-N ufw-logging-deny
-N ufw-not-local
-N ufw-reject-forward
-N ufw-reject-input
-N ufw-reject-output
-N ufw-skip-to-policy-forward
-N ufw-skip-to-policy-input
-N ufw-skip-to-policy-output
-N ufw-track-forward
-N ufw-track-input
-N ufw-track-output
-N ufw-user-forward
-N ufw-user-input
-N ufw-user-limit
-N ufw-user-limit-accept
-N ufw-user-logging-forward
-N ufw-user-logging-input
-N ufw-user-logging-output
-N ufw-user-output
-A INPUT -j ufw-before-logging-input
-A INPUT -j ufw-before-input
-A INPUT -j ufw-after-input
-A INPUT -j ufw-after-logging-input
-A INPUT -j ufw-reject-input
-A INPUT -j ufw-track-input
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -o br-ec478ce025ee -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-ec478ce025ee -j DOCKER
-A FORWARD -i br-ec478ce025ee ! -o br-ec478ce025ee -j ACCEPT
-A FORWARD -i br-ec478ce025ee -o br-ec478ce025ee -j ACCEPT
-A FORWARD -j ufw-before-logging-forward
-A FORWARD -j ufw-before-forward
-A FORWARD -j ufw-after-forward
-A FORWARD -j ufw-after-logging-forward
-A FORWARD -j ufw-reject-forward
-A FORWARD -j ufw-track-forward
-A OUTPUT -j ufw-before-logging-output
-A OUTPUT -j ufw-before-output
-A OUTPUT -j ufw-after-output
-A OUTPUT -j ufw-after-logging-output
-A OUTPUT -j ufw-reject-output
-A OUTPUT -j ufw-track-output
-A DOCKER -d 172.17.0.3/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 8080 -j ACCEPT
-A DOCKER -d 172.17.0.4/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 5432 -j ACCEPT
-A DOCKER -d 172.17.0.2/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 8000 -j ACCEPT
-A DOCKER -d 172.17.0.2/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 9000 -j ACCEPT
-A DOCKER -d 172.17.0.5/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 8080 -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -i br-ec478ce025ee ! -o br-ec478ce025ee -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o br-ec478ce025ee -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
-A ufw-after-input -p udp -m udp --dport 137 -j ufw-skip-to-policy-input
-A ufw-after-input -p udp -m udp --dport 138 -j ufw-skip-to-policy-input
-A ufw-after-input -p tcp -m tcp --dport 139 -j ufw-skip-to-policy-input
-A ufw-after-input -p tcp -m tcp --dport 445 -j ufw-skip-to-policy-input
-A ufw-after-input -p udp -m udp --dport 67 -j ufw-skip-to-policy-input
-A ufw-after-input -p udp -m udp --dport 68 -j ufw-skip-to-policy-input
-A ufw-after-input -m addrtype --dst-type BROADCAST -j ufw-skip-to-policy-input
-A ufw-after-logging-input -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW BLOCK] "
-A ufw-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A ufw-before-forward -p icmp -m icmp --icmp-type 3 -j ACCEPT
-A ufw-before-forward -p icmp -m icmp --icmp-type 11 -j ACCEPT
-A ufw-before-forward -p icmp -m icmp --icmp-type 12 -j ACCEPT
-A ufw-before-forward -p icmp -m icmp --icmp-type 8 -j ACCEPT
-A ufw-before-forward -j ufw-user-forward
-A ufw-before-input -i lo -j ACCEPT
-A ufw-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A ufw-before-input -m conntrack --ctstate INVALID -j ufw-logging-deny
-A ufw-before-input -m conntrack --ctstate INVALID -j DROP
-A ufw-before-input -p icmp -m icmp --icmp-type 3 -j ACCEPT
-A ufw-before-input -p icmp -m icmp --icmp-type 11 -j ACCEPT
-A ufw-before-input -p icmp -m icmp --icmp-type 12 -j ACCEPT
-A ufw-before-input -p icmp -m icmp --icmp-type 8 -j ACCEPT
-A ufw-before-input -p udp -m udp --sport 67 --dport 68 -j ACCEPT
-A ufw-before-input -j ufw-not-local
-A ufw-before-input -d 224.0.0.251/32 -p udp -m udp --dport 5353 -j ACCEPT
-A ufw-before-input -d 239.255.255.250/32 -p udp -m udp --dport 1900 -j ACCEPT
-A ufw-before-input -j ufw-user-input
-A ufw-before-output -o lo -j ACCEPT
-A ufw-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A ufw-before-output -j ufw-user-output
-A ufw-logging-allow -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW ALLOW] "
-A ufw-logging-deny -m conntrack --ctstate INVALID -m limit --limit 3/min --limit-burst 10 -j RETURN
-A ufw-logging-deny -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW BLOCK] "
-A ufw-not-local -m addrtype --dst-type LOCAL -j RETURN
-A ufw-not-local -m addrtype --dst-type MULTICAST -j RETURN
-A ufw-not-local -m addrtype --dst-type BROADCAST -j RETURN
-A ufw-not-local -m limit --limit 3/min --limit-burst 10 -j ufw-logging-deny
-A ufw-not-local -j DROP
-A ufw-skip-to-policy-forward -j ACCEPT
-A ufw-skip-to-policy-input -j DROP
-A ufw-skip-to-policy-output -j ACCEPT
-A ufw-track-forward -p tcp -m conntrack --ctstate NEW -j ACCEPT
-A ufw-track-forward -p udp -m conntrack --ctstate NEW -j ACCEPT
-A ufw-track-output -p tcp -m conntrack --ctstate NEW -j ACCEPT
-A ufw-track-output -p udp -m conntrack --ctstate NEW -j ACCEPT
-A ufw-user-input -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW -m recent --set --name DEFAULT --mask 255.255.255.255 --rsource
-A ufw-user-input -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW -m recent --update --seconds 30 --hitcount 6 --name DEFAULT --mask 255.255.255.255 --rsource -j ufw-user-limit
-A ufw-user-input -p tcp -m tcp --dport 22 -j ufw-user-limit-accept
-A ufw-user-input -p tcp -m tcp --dport 2375 -j ACCEPT
-A ufw-user-input -p tcp -m tcp --dport 2376 -j ACCEPT
-A ufw-user-limit -m limit --limit 3/min -j LOG --log-prefix "[UFW LIMIT BLOCK] "
-A ufw-user-limit -j REJECT --reject-with icmp-port-unreachable
-A ufw-user-limit-accept -j ACCEPT
root#docker-s-1vcpu-1gb-lon1-01:~#
3°
root#docker-s-1vcpu-1gb-lon1-01:~# docker inspect django | tail -n51
"NetworkSettings": {
"Bridge": "",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"8000/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "56733"
}
],
"9000/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9000"
}
]
},
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:02",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "8eddd72be1915a2d0f5eb1a4812271debc4e4eca103800ede3511f3f4c56ae98",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:02",
"DriverOpts": null
}
}
}
}
]
root#docker-s-1vcpu-1gb-lon1-01:~# docker inspect nginx
[
{
"RepoTags": [
"nginx:latest"
],
"RepoDigests": [
],
"Parent": "",
"Comment": "",
"Created": "2020-11-18T07:48:35.319575714Z",
"Container": "7e8ca989e54001b9955974e36eb6d679ab4fe015066014645ef927fe88c326ec",
"ContainerConfig": {
"Hostname": "7e8ca989e540",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"80/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NGINX_VERSION=1.19.4",
"NJS_VERSION=0.4.4",
"PKG_RELEASE=1~buster"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) ",
"CMD [\"nginx\" \"-g\" \"daemon off;\"]"
],
"Volumes": null,
"WorkingDir": "",
"Entrypoint": [
"/docker-entrypoint.sh"
],
"OnBuild": null,
"Labels": {
"maintainer": "NGINX Docker Maintainers <docker-maint#nginx.com>"
},
"StopSignal": "SIGTERM"
},
"DockerVersion": "19.03.12",
"Author": "",
"Config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"80/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NGINX_VERSION=1.19.4",
"NJS_VERSION=0.4.4",
"PKG_RELEASE=1~buster"
],
"Cmd": [
"nginx",
"-g",
"daemon off;"
],
"Volumes": null,
"WorkingDir": "",
"Entrypoint": [
"/docker-entrypoint.sh"
],
"OnBuild": null,
"Labels": {
"maintainer": "NGINX Docker Maintainers <docker-maint#nginx.com>"
},
"StopSignal": "SIGTERM"
},
"Architecture": "amd64",
"Os": "linux",
"Size": 132890123,
"VirtualSize": 132890123,
"GraphDriver": {
"Data": {
},
"Name": "overlay2"
},
"RootFS": {
"Type": "layers",
"Layers": [
]
},
"Metadata": {
"LastTagTime": "0001-01-01T00:00:00Z"
}
}
]
root#docker-s-1vcpu-1gb-lon1-01:~# docker inspect postgreSQL
[
{
"Id": "c0e06b4a1fa410d0344e7b40fbc7b78308f70638affa65266357c8346570bf4e",
"Created": "2020-11-25T11:54:28.352080019Z",
"Path": "docker-entrypoint.sh",
"Args": [
"-c",
"config_file=/etc/postgresql/postgresql.conf"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 437388,
"ExitCode": 0,
"Error": "",
"StartedAt": "2020-11-25T11:54:28.93246511Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"ResolvConfPath": "Name": "/postgreSQL",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "docker-default",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/root/babymusic_django_server/postgreSql/appdata:/var/lib/postgresql/data/pgdata",
"/root/babymusic_django_server/postgreSql/my-postgres.conf:/etc/postgresql/postgresql.conf"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "default",
"PortBindings": {
"5432/tcp": [
{
"HostIp": "",
"HostPort": "5432"
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": [
"/proc/asound",
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"ReadonlyPaths": [
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
},
"GraphDriver": {
"Data": {
"LowerDir":
},
"Name": "overlay2"
},
"Config": {
"Hostname": "c0e06b4a1fa4",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"5432/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"POSTGRES_USER=romain",
"POSTGRES_DB=baby_music",
"PGDATA=/var/lib/postgresql/data/pgdata",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/13/bin",
"GOSU_VERSION=1.12",
"LANG=en_US.utf8",
"PG_MAJOR=13",
"PG_VERSION=13.1-1.pgdg100+1"
],
"Cmd": [
"-c",
"config_file=/etc/postgresql/postgresql.conf"
],
"Image": "postgres:13",
"Volumes": {
"/var/lib/postgresql/data": {}
},
"WorkingDir": "",
"Entrypoint": [
"docker-entrypoint.sh"
],
"OnBuild": null,
"Labels": {},
"StopSignal": "SIGINT"
},
"NetworkSettings": {
"Bridge": "",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"5432/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "5432"
}
]
},
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.4",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:04",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "8eddd72be1915a2d0f5eb1a4812271debc4e4eca103800ede3511f3f4c56ae98",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.4",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:04",
"DriverOpts": null
}
}
}
}
]
root#docker-s-1vcpu-1gb-lon1-01:~#
root#docker-s-1vcpu-1gb-lon1-01:~# docker inspect PostGresqlAdmin
[
{
"Path": "entrypoint.sh",
"Args": [
"docker-php-entrypoint",
"php",
"-S",
"[::]:8080",
"-t",
"/var/www/html"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 454939,
"ExitCode": 0,
"Error": "",
"StartedAt": "2020-11-25T21:00:16.349310968Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "default",
"PortBindings": {
"8080/tcp": [
{
"HostIp": "",
"HostPort": "8081"
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
},
"Mounts": [],
"Config": {
"Hostname": "4c76998dc75a",
"Domainname": "",
"User": "adminer",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"8080/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"NetworkSettings": {
"Bridge": "",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"8080/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "8081"
}
]
},
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.5",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:05",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "8eddd72be1915a2d0f5eb1a4812271debc4e4eca103800ede3511f3f4c56ae98",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.5",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:05",
"DriverOpts": null
}
}
}
}
]
EDIT 2
Check ipv4 forwarding :
root#docker-s-1vcpu-1gb-lon1-01:~# sysctl net.ipv4.conf.all.forwarding
net.ipv4.conf.all.forwarding = 1
Accept port forwarding :
root#docker-s-1vcpu-1gb-lon1-01:~# sudo iptables -P FORWARD ACCEPT
root#docker-s-1vcpu-1gb-lon1-01:~# iptables -S | grep FORWARD
-P FORWARD ACCEPT
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -o br-ec478ce025ee -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-ec478ce025ee -j DOCKER
-A FORWARD -i br-ec478ce025ee ! -o br-ec478ce025ee -j ACCEPT
-A FORWARD -i br-ec478ce025ee -o br-ec478ce025ee -j ACCEPT
-A FORWARD -j ufw-before-logging-forward
-A FORWARD -j ufw-before-forward
-A FORWARD -j ufw-after-forward
-A FORWARD -j ufw-after-logging-forward
-A FORWARD -j ufw-reject-forward
-A FORWARD -j ufw-track-forward
Use 172.17.0.4 to access the database:
works !
Publishing on the --net=host gives a warning and doesn't let the access to the db :
If you look at nmap output you can see the port is reported as filtered. That means one of these:
The routing from Docker network (typocally 172.17. 0.0/16 ) is not setup correctly
Each container is running it's own separate network with a overlapping subnet which prevents packets to route back correctly
or there is a packet filter (iptables) which prevents which prevents packets to reach to the destination correctly.
What I need in addition to debug the issue, is route table (ip route), packet filter output (iptables -S), and docker inspect from each container.
Update:
These are the potential problems that I see:
Fix the current setup:
You have -P FORWARD DROP in your iptables, this prevents the access, use: sudo iptables -P FORWARD ACCEPT to enable that.
Please check sysctl net.ipv4.conf.all.forwarding that should be set to 1 if not edit /etc/sysctl.conf to fix that, and reload the settings with sysctl -p.
OR
Alternatively you can use postgresql ip 172.17.0.4 to access the database.
Another option is to set postgresql network to --net=host and then you should be able to get around the iptables.
Alternatively you can connect your app to postgresql network by specifying --net=container:<postgresql_container_name> and use localhost to access the database.
You can create a separate network in docker and run all the containers there so your are able to access from anywhere to anywhere without routing through your host IP
Probably there are a few other way to achieve this, but I leave it to you to figure out :)
Update 2:
-P INPUT DROP is also an issue, use this for fix it: sudo iptables -P INPUT ACCEPT
If you choose the first option for fix your current settings, make sure iptables changes are persisted otherwise you'll lose them on reboot. Consult your Linux distro manual for figure out how to do that.

packer provisioning by ansible fails in aws codebuild

My Codebuild project that it creates AMI by packer by ansible provisioner.
This packer settings success in my local environment and Amazon linux2 ec2 environment. However, when I use AWS Codebuild with aws/codebuild/amazonlinux2-x86_64-standard:1.0 image and it fails.
I already tried this settings remote_tmp = /tmp or remote_tmp = /tmp/.ansible-${USER}/tmp but did not work.
Authentication or permission failure, did not have permissions on the remote directory
version: 0.2
phases:
install:
runtime-versions:
python: 3.7
pre_build:
commands:
- python --version
- pip --version
- curl -qL -o packer.zip https://releases.hashicorp.com/packer/1.4.3/packer_1.4.3_linux_amd64.zip && unzip packer.zip
- ./packer version
- pip install --user ansible==2.8.5
- ansible --version
- echo 'Validate packer json'
- ./packer validate packer.json
build:
commands:
- ./packer build -color=false packer.json | tee build.log
{
"builders": [{
"type": "amazon-ebs",
"region": "ap-northeast-1",
"ami_regions": "ap-northeast-1",
"source_ami": "ami-0ff21806645c5e492",
"instance_type": "t2.micro",
"ssh_username": "ec2-user",
"ami_name": "packer-quick-start {{timestamp}}",
"ami_description": "created by packer at {{timestamp}}",
"ebs_optimized": false,
"tags": {
"OS_Version": "Amazon Linux AMI 2018.03",
"timestamp": "{{timestamp}}",
"isotime": "{{isotime \"2006-01-02 03:04:05\"}}"
},
"disable_stop_instance": false
}],
"provisioners": [
{
"type" : "ansible",
"extra_arguments": [
"-vvv"
],
"playbook_file" : "ansible/main.yaml"
}
]
}
==> amazon-ebs: Prevalidating AMI Name: packer-quick-start 1569943272
amazon-ebs: Found Image ID: ami-0ff21806645c5e492
==> amazon-ebs: Creating temporary keypair: packer_5d936ee8-541f-5c9a-6955-9672526afc1a
==> amazon-ebs: Creating temporary security group for this instance: packer_5d936ef1-6546-d9d0-60ff-2dc4c011036f
==> amazon-ebs: Authorizing access to port 22 from [0.0.0.0/0] in the temporary security groups...
==> amazon-ebs: Launching a source AWS instance...
==> amazon-ebs: Adding tags to source instance
amazon-ebs: Adding tag: "Name": "Packer Builder"
amazon-ebs: Instance ID: i-04b00db56a8b3b6d0
==> amazon-ebs: Waiting for instance (i-04b00db56a8b3b6d0) to become ready...
==> amazon-ebs: Using ssh communicator to connect: 3.112.61.8
==> amazon-ebs: Waiting for SSH to become available...
==> amazon-ebs: Connected to SSH!
==> amazon-ebs: Provisioning with Ansible...
==> amazon-ebs: Executing Ansible: ansible-playbook --extra-vars packer_build_name=amazon-ebs packer_builder_type=amazon-ebs -o IdentitiesOnly=yes -i /tmp/packer-provisioner-ansible244097143 /codebuild/output/src965785042/src/github.com/repoUsername/reponame/ansible/main.yaml -e ansible_ssh_private_key_file=/tmp/ansible-key242793848 -vvv
amazon-ebs: ansible-playbook 2.8.5
amazon-ebs: config file = /codebuild/output/src965785042/src/github.com/repoUsername/reponame/ansible.cfg
amazon-ebs: configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
amazon-ebs: ansible python module location = /root/.local/lib/python3.7/site-packages/ansible
amazon-ebs: executable location = /root/.local/bin/ansible-playbook
amazon-ebs: python version = 3.7.4 (default, Sep 20 2019, 22:55:10) [GCC 7.3.1 20180303 (Red Hat 7.3.1-5)]
amazon-ebs: Using /codebuild/output/src965785042/src/github.com/repoUsername/reponame/ansible.cfg as config file
amazon-ebs: host_list declined parsing /tmp/packer-provisioner-ansible244097143 as it did not pass it's verify_file() method
amazon-ebs: script declined parsing /tmp/packer-provisioner-ansible244097143 as it did not pass it's verify_file() method
amazon-ebs: auto declined parsing /tmp/packer-provisioner-ansible244097143 as it did not pass it's verify_file() method
amazon-ebs: Parsed /tmp/packer-provisioner-ansible244097143 inventory source with ini plugin
amazon-ebs:
amazon-ebs: PLAYBOOK: main.yaml ************************************************************
amazon-ebs: 1 plays in /codebuild/output/src965785042/src/github.com/repoUsername/reponame/ansible/main.yaml
amazon-ebs:
amazon-ebs: PLAY [all] *********************************************************************
amazon-ebs: META: ran handlers
amazon-ebs:
amazon-ebs: TASK [be sure httpd is installed] **********************************************
amazon-ebs: task path: /codebuild/output/src965785042/src/github.com/repoUsername/reponame/ansible/main.yaml:6
amazon-ebs: <127.0.0.1> ESTABLISH SSH CONNECTION FOR USER: root
amazon-ebs: <127.0.0.1> SSH: EXEC ssh -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o Port=35595 -o 'IdentityFile="/tmp/ansible-key242793848"' -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o 'User="root"' -o ConnectTimeout=10 -o ControlPath=/root/.ansible/cp/02aaab1733 127.0.0.1 '/bin/sh -c '"'"'echo ~root && sleep 0'"'"''
amazon-ebs: <127.0.0.1> (0, b'/root\n', b"Warning: Permanently added '[127.0.0.1]:35595' (RSA) to the list of known hosts.\r\n")
amazon-ebs: <127.0.0.1> ESTABLISH SSH CONNECTION FOR USER: root
amazon-ebs: <127.0.0.1> SSH: EXEC ssh -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o Port=35595 -o 'IdentityFile="/tmp/ansible-key242793848"' -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o 'User="root"' -o ConnectTimeout=10 -o ControlPath=/root/.ansible/cp/02aaab1733 127.0.0.1 '/bin/sh -c '"'"'( umask 77 && mkdir -p "` echo /root/.ansible/tmp/ansible-tmp-1569943320.4544108-49329379039882 `" && echo ansible-tmp-1569943320.4544108-49329379039882="` echo /root/.ansible/tmp/ansible-tmp-1569943320.4544108-49329379039882 `" ) && sleep 0'"'"''
amazon-ebs: <127.0.0.1> (1, b'', b'mkdir: cannot create directory \xe2\x80\x98/root\xe2\x80\x99: Permission denied\n')
amazon-ebs: <127.0.0.1> Failed to connect to the host via ssh: mkdir: cannot create directory ‘/root’: Permission denied
amazon-ebs: fatal: [default]: UNREACHABLE! => {
amazon-ebs: "changed": false,
amazon-ebs: "msg": "Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the target directory. Consider changing the remote tmp path in ansible.cfg to a path rooted in \"/tmp\". Failed command was: ( umask 77 && mkdir -p \"` echo /root/.ansible/tmp/ansible-tmp-1569943320.4544108-49329379039882 `\" && echo ansible-tmp-1569943320.4544108-49329379039882=\"` echo /root/.ansible/tmp/ansible-tmp-1569943320.4544108-49329379039882 `\" ), exited with result 1",
amazon-ebs: "unreachable": true
amazon-ebs: }
amazon-ebs:
amazon-ebs: PLAY RECAP *********************************************************************
amazon-ebs: default : ok=0 changed=0 unreachable=1 failed=0 skipped=0 rescued=0 ignored=0
amazon-ebs:
==> amazon-ebs: Terminating the source AWS instance...
==> amazon-ebs: Cleaning up any extra volumes...
==> amazon-ebs: No volumes to clean up, skipping
==> amazon-ebs: Deleting temporary security group...
==> amazon-ebs: Deleting temporary keypair...
I know it fails because it tried to mkdir /root and Permission denied.
But don't know why it tried to mkdir /root. How can I change this behavior?
I solved and it was super simple cause.
Because AWS Codebuild builds by the root user, ansible makes a connection by the root user. I just wrote like this and solved it.
"provisioners": [
{
"type" : "ansible",
"user": "ec2-user",
"playbook_file" : "ansible/main.yaml"
}
]
My ansible file is simple for testing.
---
- hosts: all
become: yes
gather_facts: no
tasks:
- name: be sure httpd is installed
yum: name=httpd state=installed
- name: be sure httpd is running and enabled
service: name=httpd state=started enabled=yes

How to install supervisor in Elastic Beanstalk through .ebextension?

May I know how can I install supervisor into Elastic Beanstalk through .ebextension? And how can I execute supervisor command through .ebextension?
Supervisorctl and supervisord are already present on elasticbeanstalk instances in the /usr/local/bin directory. You can use ebextensions to load a supervisor config file and run supervisor in daemon mode.
In your .ebextensions folder create a file 002_supervisor.config.
This file does 3 things:
Creates a supervisor.conf file in /usr/local/etc on your elastic beanstalk instance.
Creates an init.d script so that supervisor will be ran as a daemon at system start
Runs restart on supervisor when the application is deployed
files:
/usr/local/etc/supervisord.conf:
mode: "000755"
owner: root
group: root
content: |
[unix_http_server]
file=/tmp/supervisor.sock ; (the path to the socket file)
[supervisord]
logfile=/tmp/supervisord.log ; (main log file;default $CWD/supervisord.log)
logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
logfile_backups=10 ; (num of main logfile rotation backups;default 10)
loglevel=info ; (log level;default info; others: debug,warn,trace)
pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
nodaemon=false ; (start in foreground if true;default false)
minfds=1024 ; (min. avail startup file descriptors;default 1024)
minprocs=200 ; (min. avail process descriptors;default 200)
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket
[include]
files = /usr/local/etc/*.conf
[inet_http_server]
port = 127.0.0.1:9001
/etc/init.d/supervisord:
mode: "000755"
owner: root
group: root
content: |
#!/bin/bash
# Source function library
. /etc/rc.d/init.d/functions
# Source system settings
if [ -f /etc/sysconfig/supervisord ]; then
. /etc/sysconfig/supervisord
fi
# Path to the supervisorctl script, server binary,
# and short-form for messages.
supervisorctl=/usr/local/bin/supervisorctl
supervisord=${SUPERVISORD-/usr/local/bin/supervisord}
prog=supervisord
pidfile=${PIDFILE-/tmp/supervisord.pid}
lockfile=${LOCKFILE-/var/lock/subsys/supervisord}
STOP_TIMEOUT=${STOP_TIMEOUT-60}
OPTIONS="${OPTIONS--c /usr/local/etc/supervisord.conf}"
RETVAL=0
start() {
echo -n $"Starting $prog: "
daemon --pidfile=${pidfile} $supervisord $OPTIONS
RETVAL=$?
echo
if [ $RETVAL -eq 0 ]; then
touch ${lockfile}
$supervisorctl $OPTIONS status
fi
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc -p ${pidfile} -d ${STOP_TIMEOUT} $supervisord
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -rf ${lockfile} ${pidfile}
}
reload() {
echo -n $"Reloading $prog: "
LSB=1 killproc -p $pidfile $supervisord -HUP
RETVAL=$?
echo
if [ $RETVAL -eq 7 ]; then
failure $"$prog reload"
else
$supervisorctl $OPTIONS status
fi
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p ${pidfile} $supervisord
RETVAL=$?
[ $RETVAL -eq 0 ] && $supervisorctl $OPTIONS status
;;
restart)
restart
;;
condrestart|try-restart)
if status -p ${pidfile} $supervisord >&/dev/null; then
stop
start
fi
;;
force-reload|reload)
reload
;;
*)
echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|reload}"
RETVAL=2
esac
exit $RETVAL
commands:
01_start_supervisor:
command: '/etc/init.d/supervisord restart'
leader_only: true
Hope this helps!

vagrant up - mount error on mavericks

I just cannot get my vagrant box up and running. This vagrant error makes me sick:
$ vagrant up
...
==> default: Mounting shared folders...
default: /vagrant => /path/to/folder/of/Vagrantfile
Failed to mount folders in Linux guest. This is usually because
the "vboxsf" file system is not available. Please verify that
the guest additions are properly installed in the guest and
can work properly. The command attempted was:
mount -t vboxsf -o uid=`id -u vagrant`,gid=`getent group vagrant | cut -d: -f3` /vagrant /vagrant
mount -t vboxsf -o uid=`id -u vagrant`,gid=`id -g vagrant` /vagrant /vagrant
...
$ vagrant reload
==> default: Attempting graceful shutdown of VM...
==> default: Clearing any previously set forwarded ports...
==> default: Clearing any previously set network interfaces...
There was an error while executing `VBoxManage`, a CLI used by Vagrant
for controlling VirtualBox. The command and stderr is shown below.
Command: ["hostonlyif", "create"]
Stderr: 0%...
Progress state: NS_ERROR_FAILURE
VBoxManage: error: Failed to create the host-only adapter
VBoxManage: error: VBoxNetAdpCtl: Error while adding new interface: failed to open /dev/vboxnetctl: No such file or directory
VBoxManage: error: Details: code NS_ERROR_FAILURE (0x80004005), component HostNetworkInterface, interface IHostNetworkInterface
VBoxManage: error: Context: "int handleCreate(HandlerArg*, int, int*)" at line 66 of file VBoxManageHostonly.cpp
Things I've tried:
sudo launchctl load /Library/LaunchDaemons/org.virtualbox.startup.plist
destroying the box and trying again
restarting
Does anyone else have this problem and got a working solution?
vagrant version is 1.5.4
virtualbox version is 4.3.10 r93012
Content of my Vagrantfile:
Vagrant.configure("2") do |config|
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
config.vm.network :private_network, ip: "192.168.56.101" #eth1
# find your correkt interface name by: VBoxManage list bridgedifs"
config.vm.network :public_network, bridge: "en1: Monitor-Ethernet" #eth2
config.ssh.forward_agent = true
config.vm.provider :virtualbox do |v|
v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
v.customize ["modifyvm", :id, "--memory", 2048]
v.customize ["modifyvm", :id, "--ioapic", "on"]
v.customize ["modifyvm", :id, "--cpus", "2"]
v.customize ["modifyvm", :id, "--name", "fsc_box"]
end
nfs_setting = RUBY_PLATFORM =~ /darwin/ || RUBY_PLATFORM =~ /linux/
config.vm.synced_folder "../../", "/var/www/symfony/current/", id: "symfony" , :nfs => nfs_setting
config.vm.provision :shell, :inline =>
"if [[ ! -f /apt-get-run ]]; then sudo apt-get update && sudo touch /apt-get-run; fi"
config.vm.provision :puppet do |puppet|
puppet.manifests_path = "site/manifests"
puppet.module_path = "."
puppet.options = "--verbose --debug"
end
end
Seems like I fixed it via re-installing VirtualBox.