Wercker gh-pages deploy step failing - github-pages

So I am using Wercker to auto-deploy my Hugo blog to Github Pages. The Build step in Wercker is working fine, but toward the end of the Deploy step I get the error "gh-pages failed pushing to github pages"
I am specifically using the lukevivier/gh-pages step for deployment. My YAML is as follows:
box: debian
build:
steps:
- arjen/hugo-build:
version: "0.14"
theme: hugo-agency-theme
flags: --buildDrafts=true
deploy:
steps:
- install-packages:
packages: git ssh-client
- lukevivier/gh-pages:
token: $GIT_TOKEN
basedir: public
domain: johnnyjanzen.github.io
The error log is as follows
export WERCKER_STEP_ROOT="/pipeline/gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac"
export WERCKER_STEP_ID="gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac"
export WERCKER_STEP_OWNER="lukevivier"
export WERCKER_STEP_NAME="gh-pages"
export WERCKER_REPORT_NUMBERS_FILE="/report/gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac/numbers.ini"
export WERCKER_REPORT_MESSAGE_FILE="/report/gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac/message.txt"
export WERCKER_REPORT_ARTIFACTS_DIR="/report/gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac/artifacts"
export WERCKER_GH_PAGES_TOKEN="$GIT_TOKEN"
export WERCKER_GH_PAGES_BASEDIR="public"
export WERCKER_GH_PAGES_DOMAIN="johnnyjanzen.github.io"
source "/pipeline/gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac/run.sh" < /dev/null
using github repo "johnnyjanzen/mmucan"
Initialized empty Git repository in /pipeline/source/public/.git/
[master (root-commit) bcacf48] deploy from johnnyjanzen
104 files changed, 19649 insertions(+)
create mode 100644 .DS_Store
create mode 100644 404.html
create mode 100644 CNAME
create mode 100644 css/agency.css
create mode 100644 css/bootstrap.css
create mode 100644 css/bootstrap.min.css
create mode 100755 css/dependenciesBAD.css
create mode 100755 css/styleddBAD.css
create mode 100644 font-awesome/css/font-awesome.css
create mode 100644 font-awesome/css/font-awesome.min.css
create mode 100644 font-awesome/fonts/FontAwesome.otf
create mode 100644 font-awesome/fonts/fontawesome-webfont.eot
create mode 100644 font-awesome/fonts/fontawesome-webfont.svg
create mode 100644 font-awesome/fonts/fontawesome-webfont.ttf
create mode 100644 font-awesome/fonts/fontawesome-webfont.woff
create mode 100644 font-awesome/less/bordered-pulled.less
create mode 100644 font-awesome/less/core.less
create mode 100644 font-awesome/less/fixed-width.less
create mode 100644 font-awesome/less/font-awesome.less
create mode 100644 font-awesome/less/icons.less
create mode 100644 font-awesome/less/larger.less
create mode 100644 font-awesome/less/list.less
create mode 100644 font-awesome/less/mixins.less
create mode 100644 font-awesome/less/path.less
create mode 100644 font-awesome/less/rotated-flipped.less
create mode 100644 font-awesome/less/spinning.less
create mode 100644 font-awesome/less/stacked.less
create mode 100644 font-awesome/less/variables.less
create mode 100644 font-awesome/scss/_bordered-pulled.scss
create mode 100644 font-awesome/scss/_core.scss
create mode 100644 font-awesome/scss/_fixed-width.scss
create mode 100644 font-awesome/scss/_icons.scss
create mode 100644 font-awesome/scss/_larger.scss
create mode 100644 font-awesome/scss/_list.scss
create mode 100644 font-awesome/scss/_mixins.scss
create mode 100644 font-awesome/scss/_path.scss
create mode 100644 font-awesome/scss/_rotated-flipped.scss
create mode 100644 font-awesome/scss/_spinning.scss
create mode 100644 font-awesome/scss/_stacked.scss
create mode 100644 font-awesome/scss/_variables.scss
create mode 100644 font-awesome/scss/font-awesome.scss
create mode 100644 fonts/glyphicons-halflings-regular.eot
create mode 100644 fonts/glyphicons-halflings-regular.svg
create mode 100644 fonts/glyphicons-halflings-regular.ttf
create mode 100644 fonts/glyphicons-halflings-regular.woff
create mode 100644 fonts/glyphicons-halflings-regular.woff2
create mode 100644 img/about/1.jpg
create mode 100644 img/about/2.jpg
create mode 100644 img/about/3.jpg
create mode 100644 img/about/4.jpg
create mode 100644 img/header-bg.jpg
create mode 100644 img/logo_1_transparent.png
create mode 100644 img/logos/aetuts.jpg
create mode 100644 img/logos/creative-market.jpg
create mode 100644 img/logos/designmodo.jpg
create mode 100644 img/logos/envato.jpg
create mode 100644 img/logos/microlancer.jpg
create mode 100644 img/logos/themeforest.jpg
create mode 100644 img/logos/wordpress.jpg
create mode 100644 img/map-image.png
create mode 100644 img/portfolio/dreams-preview.png
create mode 100644 img/portfolio/dreams.png
create mode 100644 img/portfolio/escape-preview.png
create mode 100644 img/portfolio/escape.png
create mode 100644 img/portfolio/golden-preview.png
create mode 100644 img/portfolio/golden.png
create mode 100644 img/portfolio/roundicons-preview.png
create mode 100644 img/portfolio/roundicons.png
create mode 100644 img/portfolio/startup-framework-preview.png
create mode 100644 img/portfolio/startup-framework.png
create mode 100644 img/portfolio/treehouse-preview.png
create mode 100644 img/portfolio/treehouse.png
create mode 100644 img/team/1.jpg
create mode 100644 img/team/2.jpg
create mode 100644 img/team/3.jpg
create mode 100644 index.html
create mode 100644 index.xml
create mode 100644 js/agency.js
create mode 100644 js/bootstrap.js
create mode 100644 js/bootstrap.min.js
create mode 100644 js/cbpAnimatedHeader.js
create mode 100644 js/cbpAnimatedHeader.min.js
create mode 100644 js/classie.js
create mode 100644 js/contact_me.js
create mode 100755 js/dependencies.js
create mode 100644 js/jqBootstrapValidation.js
create mode 100644 js/jquery.js
create mode 100755 js/script.js
create mode 100644 page/1/index.html
create mode 100644 page/2/index.html
create mode 100644 post/2016-02-11-your-filename/index.html
create mode 100644 post/index.html
create mode 100644 post/index.xml
create mode 100644 post/mynew copy 2/index.html
create mode 100644 post/mynew copy 3/index.html
create mode 100644 post/mynew copy 4/index.html
create mode 100644 post/mynew copy 5/index.html
create mode 100644 post/mynew copy 6/index.html
create mode 100644 post/mynew copy 7/index.html
create mode 100644 post/mynew copy/index.html
create mode 100644 post/mynew/index.html
create mode 100644 post/mynew2/index.html
create mode 100644 post/page/1/index.html
create mode 100644 sitemap.xml
remote: Permission to johnnyjanzen/mmucan.git denied to johnnyjanzen.
fatal: unable to access 'https://**my github personal access token**#github.com/johnnyjanzen/mmucan.git/': The requested URL returned error: 403
/pipeline/gh-pages-37f7864e-6ccb-403c-88bd-004af3de71ac/run.sh: line 60: warning: command not found
failed: failed pushing to github pages
I have created a personal access token in my github account, and placed it in Wercker as an environment variable.
The first time I tried this, my main /mmucan git repository only contained one branch (master). I read online somewhere that Wercker depends on there being a gh-pages branch, so I created this branch (which is identical to the master branch) but the exact same error log was returned.
Any help would be very appreciated, cheers!

I answered my own question. I read the "personal access token" wrong on Github, and thought you ticked the boxes to LIMIT (aka, disable) access for the token. So i left all the boxes unticked haha.
The correct thing to do is to tick all the boxes referring to repository access when you make the personal access token.

Related

Why does DrupalVM using Virtualbox on MacOSX Ventura not work with "Required ruby-2.7.0 is not installed"?

I have come up a grinding halt on setting up a DrupalVM (Ubuntu 18.04) on MacOSX on Ventura.
I have updated local to git 2.39.0, vagrant 2.3.4, virtualbox 7.04, ansible 7.1.0.
I have also added ruby 2.7.0 via rbenv to the VM
The start up runs and I can vagrant ssh into to VM
The start up says that the folders are synced, but when I cd into one of the sites that I have set up, it gives me
Required ruby-2.7.0 is not installed.
To install do: 'rvm install "ruby-2.7.0"'
even though ruby is installed already.
ruby -v
ruby 2.7.0p0 (2019-12-25 revision 647ee6f091) [x86_64-linux]
And then the VM hangs. I can use another window to ssh in. And I can go to other directories beforehand
If I remove my .ruby-version file, it hangs without any response.
Any thoughts?
I have updated the dvm config files to get it to provision, but cannot find
These are my changes from master to get it running
index dae1105..00ac634 100644
--- a/default.config.yml
+++ b/default.config.yml
## -1,4 +1,3 ##
----
# Available `vagrant_box` values include:
# - geerlingguy/drupal-vm (pre-provisioned, based on Ubuntu 18.04)
# - geerlingguy/centos8
diff --git a/provisioning/roles/geerlingguy.firewall/tasks/disable-other-firewalls.yml b/provisioning/roles/geerlingguy.firewall/tasks/disable-other-firewalls.yml
index 509196a..5e3c2b4 100644
--- a/provisioning/roles/geerlingguy.firewall/tasks/disable-other-firewalls.yml
+++ b/provisioning/roles/geerlingguy.firewall/tasks/disable-other-firewalls.yml
## -26,7 +26,7 ##
args:
warn: false
register: ufw_installed
- ignore_errors: true
+ failed_when: false
changed_when: false
when:
- ansible_distribution == "Ubuntu"
## -41,7 +41,6 ##
when:
- ansible_distribution == "Ubuntu"
- firewall_disable_ufw
- - ufw_installed.rc == 0
- name: Check if ufw package is installed (on Archlinux).
command: pacman -Q ufw
diff --git a/provisioning/roles/geerlingguy.mysql/tasks/configure.yml b/provisioning/roles/geerlingguy.mysql/tasks/configure.yml
index b1f004f..fed54ba 100644
--- a/provisioning/roles/geerlingguy.mysql/tasks/configure.yml
+++ b/provisioning/roles/geerlingguy.mysql/tasks/configure.yml
## -39,7 +39,6 ##
command: "touch {{ mysql_slow_query_log_file }}"
args:
creates: "{{ mysql_slow_query_log_file }}"
- warn: false
when: mysql_slow_query_log_enabled
- name: Create datadir if it does not exist
diff --git a/provisioning/tasks/apparmor.yml b/provisioning/tasks/apparmor.yml
index c9b6dd0..fe1146c 100644
--- a/provisioning/tasks/apparmor.yml
+++ b/provisioning/tasks/apparmor.yml
## -15,7 +15,6 ##
register: mysql_apparmor
when:
- mysql_slow_query_log_enabled
- - apparmor_status.rc == 0
- name: Restart the AppArmor if necessary.
service: name=apparmor state=restarted

How can I connect to docker db from local django?

How can I connect to docker db from local django?
version: '3'
services:
redis-1:
container_name: redis1
build: ./docker/redis
environment:
X_REDIS_PORT: 7001
networks:
redisnet:
ipv4_address: 10.0.0.11
ports:
- 7001:7001
redis-2:
container_name: redis2
build: ./docker/redis
environment:
X_REDIS_PORT: 7002
networks:
redisnet:
ipv4_address: 10.0.0.12
ports:
- 7002:7002
redis-3:
container_name: redis3
build: ./docker/redis
environment:
X_REDIS_PORT: 7003
networks:
redisnet:
ipv4_address: 10.0.0.13
ports:
- 7003:7003
redis-4:
container_name: redis4
build: ./docker/redis
environment:
X_REDIS_PORT: 7004
networks:
redisnet:
ipv4_address: 10.0.0.14
ports:
- 7004:7004
redis-5:
container_name: redis5
build: ./docker/redis
environment:
X_REDIS_PORT: 7005
networks:
redisnet:
ipv4_address: 10.0.0.15
ports:
- 7005:7005
redis-6:
container_name: redis6
build: ./docker/redis
environment:
X_REDIS_PORT: 7006
networks:
redisnet:
ipv4_address: 10.0.0.16
ports:
- 7006:7006
redis-cluster:
container_name: redis-cluster
image: redis:latest
command: redis-cli -p 7001 --cluster create 10.0.0.11:7001 10.0.0.12:7002 10.0.0.13:7003 10.0.0.14:7004 10.0.0.15:7005 10.0.0.16:7006 --cluster-replicas 1 --cluster-yes
depends_on:
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
- redis-6
networks:
redisnet:
ipv4_address: 10.0.0.2
predixy:
container_name: predixy
build: ./docker/predixy
depends_on:
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
- redis-6
ports:
- 7617:7617
volumes:
- ./docker/predixy/conf:/etc/predixy/conf
networks:
redisnet:
ipv4_address: 10.0.0.3
networks:
redisnet:
driver: bridge
ipam:
driver: default
config:
- subnet: 10.0.0.0/16
This is my docker-compose.yml file and I would like to connect the predixy (cluster proxy) floated on port 7617 with django as below.
# settings.py
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://0.0.0.0:7617/",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
}
}
failed to connect to Redis: Connection refused (os error 111)
However, django does not seem to be able to connect to predixy.
For your information, I also upload the predixy file.
# docker/predixy/predixy.conf
################################### GENERAL ####################################
## Predixy configuration file example
## Specify a name for this predixy service
## redis command INFO can get this
Name PredixyExample
## Specify listen address, support IPV4, IPV6, Unix socket
## Examples:
# Bind 127.0.0.1:7617
# Bind 0.0.0.0:7617
# Bind /tmp/predixy
## Default is 0.0.0.0:7617
Bind 10.0.0.3:7617
## Worker threads
WorkerThreads 4
## Memory limit, 0 means unlimited
## Examples:
# MaxMemory 100M
# MaxMemory 1G
# MaxMemory 0
## MaxMemory can change online by CONFIG SET MaxMemory xxx
## Default is 0
MaxMemory 0
## Close the connection after a client is idle for N seconds (0 to disable)
## ClientTimeout can change online by CONFIG SET ClientTimeout N
## Default is 0
ClientTimeout 300
## IO buffer size
## Default is 4096
# BufSize 4096
################################### LOG ########################################
## Log file path
## Unspecify will log to stdout
## Default is Unspecified
Log ./predixy.log
## LogRotate support
## 1d rotate log every day
## nh rotate log every n hours 1 <= n <= 24
## nm rotate log every n minutes 1 <= n <= 1440
## nG rotate log evenry nG bytes
## nM rotate log evenry nM bytes
## time rotate and size rotate can combine eg 1h 2G, means 1h or 2G roate a time
## Examples:
# LogRotate 1d 2G
# LogRotate 1d
## Default is disable LogRotate
## In multi-threads, worker thread log need lock,
## AllowMissLog can reduce lock time for improve performance
## AllowMissLog can change online by CONFIG SET AllowMissLog true|false
## Default is true
# AllowMissLog false
## LogLevelSample, output a log every N
## all level sample can change online by CONFIG SET LogXXXSample N
LogVerbSample 0
LogDebugSample 0
LogInfoSample 10000
LogNoticeSample 1
LogWarnSample 1
LogErrorSample 1
################################### AUTHORITY ##################################
Include auth.conf
################################### SERVERS ####################################
Include cluster.conf
# Include sentinel.conf
# Include try.conf
################################### DATACENTER #################################
## LocalDC specify current machine dc
# LocalDC bj
## see dc.conf
# Include dc.conf
################################### COMMAND ####################################
## Custom command define, see command.conf
#Include command.conf
################################### LATENCY ####################################
## Latency monitor define, see latency.conf
Include latency.conf
## redis cluster server pool define
# cluster.conf
ClusterServerPool {
MasterReadPriority 60
StaticSlaveReadPriority 50
DynamicSlaveReadPriority 50
RefreshInterval 1
ServerTimeout 1
ServerFailureLimit 10
ServerRetryTimeout 1
Servers {
+ 10.0.0.11:7001
+ 10.0.0.12:7002
+ 10.0.0.13:7003
+ 10.0.0.14:7004
+ 10.0.0.15:7005
+ 10.0.0.16:7006
}
}
How can I solve this problem? I'm struggling with this problem.
What I want is to access docker-cli and connect to redis-cli -h 10.0.0.3 -p 7617 so that I can see the data I put in from local django
Replace 0.0.0.0 with predixy as below
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://predixy:7617/",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
}
}

Why does Django recreate the DB tables on each docker-container restart?

I am running Django with PostgreSQL in a docker-compose setup for development. Each time I restart the application container, the database is empty, even though I do neither restart the DBMS container nor do I drop the DBMS's data volume. It seems that Django is dropping all tables upon restart. Why?
My setup closely follows the description here. That is, my compose file looks as follows (simplified):
version: '3.8'
services:
db:
image: postgres
environment:
- POSTGRES_DB=db_dev
- POSTGRES_USER=dev
- POSTGRES_PASSWORD=postgres
volumes:
- type: volume
source: app-data
target: /var/lib/postgresql/data
app:
build: .
command: python manage.py runserver 0.0.0.0:8888
container_name: app
environment:
- DATABASE_URL
- PYTHONDONTWRITEBYTECODE=1
- PYTHONUNBUFFERED=1
volumes:
# Mount the local source code folder for quick iterations.
# See: https://www.docker.com/blog/containerized-python-development-part-3/
- type: bind
source: .
target: /code
ports:
- target: 8888
published: 8888
depends_on:
- db
env_file:
- ./dev.env
volumes:
app-data:
external: true
The Django application is started by means of an entrypoint.sh:
#! /bin/sh
if [ "$DATABASE" = "postgresql" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
doMigrate=${DB_MIGRATE:-false}
if [ "$doMigrate" = true ] ;
then
python manage.py flush --no-input
python manage.py migrate
fi
exec "$#"
In the development setup, I set DB_MIGRATE=true and DEBUG=1.
The Django flush command removes all data from the database, as explained in the documentation.
Hence, to solve my problem above, I only need to remove the line
python manage.py flush --no-input
from my entrypoint.sh script.
Explanation: I thought - incorrectly - that flush would commit any pending transactions that might still be open from other applications that might potentially use the DB. This is not the case. Instead, flush simply removes all data.

How to pass the correct project path to bitbucket pipeline?

I want to deploy aws lamda .net core project using bit bucket pipeline
I have created bitbucket-pipelines.yml like below but after build run getting error -
MSBUILD : error MSB1003: Specify a project or solution file. The current working directory does not contain a project or solution file.
file code -
image: microsoft/dotnet:sdk
pipelines:
default:
- step:
caches:
- dotnetcore
script: # Modify the commands below to build your repository.
- export PROJECT_NAME=TestAWS/AWSLambda1/AWSLambda1.sln
- dotnet restore
- dotnet build $PROJECT_NAME
- pipe: atlassian/aws-lambda-deploy:0.2.1
variables:
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
AWS_DEFAULT_REGION: 'us-east-1'
FUNCTION_NAME: 'my-lambda-function'
COMMAND: 'update'
ZIP_FILE: 'code.zip'
project structure is like this -
The problem is here:
PROJECT_NAME=TestAWS/AWSLambda1/AWSLambda1.sln
This is the incorrect path. Bitbucket Pipelines will use a special path in the Docker image, something like /opt/atlassian/pipelines/agent/build/YOUR_PROJECT , to do a Git clone of your project.
You can see this when you click on the "Build Setup" step in the Pipelines web console:
Cloning into '/opt/atlassian/pipelines/agent/build'...
You can use a pre-defined environment variable to retrieve this path: $BITBUCKET_CLONE_DIR , as described here: https://support.atlassian.com/bitbucket-cloud/docs/variables-in-pipelines/
Consider something like this in your yml build script:
script:
- echo $BITBUCKET_CLONE_DIR # Debug: Print the $BITBUCKET_CLONE_DIR
- pwd # Debug: Print the current working directory
- find "$(pwd -P)" -name AWSLambda1.sln # Debug: Show the full file path of AWSLambda1.sln
- export PROJECT_NAME="$BITBUCKET_CLONE_DIR/AWSLambda1.sln"
- echo $PROJECT_NAME
- if [ -f "$PROJECT_NAME" ]; then echo "File exists" ; fi
# Try this if the file path is not as expected
- export PROJECT_NAME="$BITBUCKET_CLONE_DIR/AWSLambda1/AWSLambda1.sln"
- echo $PROJECT_NAME
- if [ -f "$PROJECT_NAME" ]; then echo "File exists" ; fi

Ansible failed to run shell module on sbin folder

I ran Ansible Playbook on specific host:
When I execute for example iptables -L command from Ansible I got this error:
changed: [host] => {"changed": true, "cmd": "iptables -L", "delta": "0:00:00.018402", "end": "2020-04-26 09:33:11.274857", "failed_when_result": false, "msg": "non-zero return code", "rc": 127, "start": "2020-04-26 09:33:11.256455", "stderr": "/bin/sh: iptables: command not found", "stderr_lines": ["/bin/sh: iptables: command not found"], "stdout": "", "stdout_lines": []}
Example to playbook:
---
- hosts: all
gather_facts: no
tasks:
- name: ls
shell: tuned -v
args:
executable: /usr/sbin
- name: iptables flush filter
iptables:
chain: "{{ item }}"
flush: yes
with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
- name: Get iptables rules | No resilience comment
command: iptables -L
become: yes
args:
executable: /sbin
Inventory file:
[hosts]
host
[all:vars]
ansible_user=ansible_user
ansible_become_user=root
ansible_ssh_pass=pass
ansible_become=yes
but the iptables is installed on the machine.
I check more command and i got that all the commands in /sbin folder not found.
What the reason ?!
thanks for helping
got that all the commands in /sbin folder not found. What the reason
Usual reason $PATH variable, which does not include /sbin location. The simplest solution is to use full path to binary you want to run, so instead of attempting to invoke iptables you need to use /sbin/iptables.
Alternatively, which may look like better solution as it does not require you to hardcode paths nor edit anything, you can set own $PATH for the whole playbook, as documented in Ansible FAQ:
environment:
PATH: "{{ ansible_env.PATH }}:/thingy/bin"
OTHER_ENV_VAR: its_new_value
Note the above example appends /thingy/bin path to existing value of $PATH. You may want to add it first, or replace existing PATH completely if needed though. Also note that ansible_env is normally populated by fact gathering (thus you must not disable it) and the value of the variables depends on the user that did the gathering action. If you change remote_user or become_user you might end up using the wrong/different values for those variables.