was exploring HYPERLEDGER fabric but got multiple errors when trying start the network. more details are given below - fabric

this is my docker-compose-yaml file:
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
volumes:
orderer1.workspace:
orderer2.workspace:
orderer3.workspace:
orderer4.workspace:
orderer5.workspace:
peer1.developers.workspace:
peer2.developers.workspace:
peer1.accounts.workspace:
peer2.accounts.workspace:
peer1.hr.workspace:
peer2.hr.workspace:
peer1.marketing.workspace:
peer2.marketing.workspace:
networks:
byfn:
services:
orderer1.workspace:
extends:
file: base.yaml
service: orderer-base
container_name: orderer1.workspace
networks:
- byfn
volumes:
- ./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer1.workspace/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer1.workspace/tls/:/var/hyperledger/orderer/tls
- orderer1.workspace:/var/hyperledger/production/orderer
ports:
- 7050:7050
orderer2.workspace:
extends:
file: base.yaml
service: orderer-base
container_name: orderer2.workspace
networks:
- byfn
volumes:
- ./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer2.workspace/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer2.workspace/tls/:/var/hyperledger/orderer/tls
- orderer2.workspace:/var/hyperledger/production/orderer
ports:
- 8050:7050
orderer3.workspace:
extends:
file: base.yaml
service: orderer-base
container_name: orderer3.workspace
networks:
- byfn
volumes:
- ./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer3.workspace/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer3.workspace/tls/:/var/hyperledger/orderer/tls
- orderer3.workspace:/var/hyperledger/production/orderer
ports:
- 9050:7050
orderer4.workspace:
extends:
file: base.yaml
service: orderer-base
container_name: orderer4.workspace
networks:
- byfn
volumes:
- ./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer4.workspace/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer4.workspace/tls/:/var/hyperledger/orderer/tls
- orderer4.workspace:/var/hyperledger/production/orderer
ports:
- 10050:7050
orderer5.workspace:
extends:
file: base.yaml
service: orderer-base
container_name: orderer5.workspace
networks:
- byfn
volumes:
- ./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer5.workspace/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/workspace/orderers/orderer5.workspace/tls/:/var/hyperledger/orderer/tls
- orderer5.workspace:/var/hyperledger/production/orderer
ports:
- 11050:7050
peer1.developers.workspace:
container_name: peer1.developers.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.developers.workspace
- CORE_PEER_ADDRESS=peer1.developers.workspace:7051
- CORE_PEER_LISTENADDRESS=0.0.0.0:7051
- CORE_PEER_CHAINCODEADDRESS=peer1.developers.workspace:7052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer2.developers.workspace:8051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.developers.workspace:7051
- CORE_PEER_LOCALMSPID=Org1MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/developers.workspace/peers/peer1.developers.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/developers.workspace/peers/peer1.developers.workspace/tls:/etc/hyperledger/fabric/tls
- peer1.developers.workspace:/var/hyperledger/production
ports:
- 7051:7051
networks:
- byfn
peer2.developers.workspace:
container_name: peer2.developers.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer2.developers.workspace
- CORE_PEER_ADDRESS=peer2.developers.workspace:8051
- CORE_PEER_LISTENADDRESS=0.0.0.0:8051
- CORE_PEER_CHAINCODEADDRESS=peer2.developers.workspace:8052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1. .workspace:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer2.developers.workspace:8051
- CORE_PEER_LOCALMSPID=Org1MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/developers.workspace/peers/peer2.developers.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/developers.workspace/peers/peer2.developers.workspace/tls:/etc/hyperledger/fabric/tls
- peer2.developers.workspace:/var/hyperledger/production
ports:
- 8051:8051
networks:
- byfn
peer1.accounts.workspace:
container_name: peer1.accounts.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.accounts.workspace
- CORE_PEER_ADDRESS=peer1.accounts.workspace:9051
- CORE_PEER_LISTENADDRESS=0.0.0.0:9051
- CORE_PEER_CHAINCODEADDRESS=peer1.accounts.workspace:9052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:9052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer2.accounts.workspace:10051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.accounts.workspace:9051
- CORE_PEER_LOCALMSPID=Org2MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/accounts.workspace/peers/peer1.accounts.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/accounts.workspace/peers/peer1.accounts.workspace/tls:/etc/hyperledger/fabric/tls
- peer1.accounts.workspace:/var/hyperledger/production
ports:
- 9051:9051
networks:
- byfn
peer2.accounts.workspace:
container_name: peer2.accounts.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer2.accounts.workspace
- CORE_PEER_ADDRESS=peer2.accounts.workspace:10051
- CORE_PEER_LISTENADDRESS=0.0.0.0:10051
- CORE_PEER_CHAINCODEADDRESS=peer2.accounts.workspace:10052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:10052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.accounts.workspace:9051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer2.accounts.workspace:10051
- CORE_PEER_LOCALMSPID=Org2MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/accounts.workspace/peers/peer2.accounts.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/accounts.workspace/peers/peer2.accounts.workspace/tls:/etc/hyperledger/fabric/tls
- peer2.accounts.workspace:/var/hyperledger/production
ports:
- 10051:10051
networks:
- byfn
peer1.hr.workspace:
container_name: peer1.hr.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.hr.workspace
- CORE_PEER_ADDRESS=peer1.hr.workspace:11051
- CORE_PEER_LISTENADDRESS=0.0.0.0:11051
- CORE_PEER_CHAINCODEADDRESS=peer1.hr.workspace:11052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:11052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer2.hr.workspace:12051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.hr.workspace:11051
- CORE_PEER_LOCALMSPID=Org3MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/hr.workspace/peers/peer1.hr.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/hr.workspace/peers/peer1.hr.workspace/tls:/etc/hyperledger/fabric/tls
- peer1.hr.workspace:/var/hyperledger/production
ports:
- 11051:11051
networks:
- byfn
peer2.hr.workspace:
container_name: peer2.hr.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer2.hr.workspace
- CORE_PEER_ADDRESS=peer2.hr.workspace:12051
- CORE_PEER_LISTENADDRESS=0.0.0.0:12051
- CORE_PEER_CHAINCODEADDRESS=peer2.hr.workspace:12052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:12052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.hr.workspace:11051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer2.hr.workspace:12051
- CORE_PEER_LOCALMSPID=Org3MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/hr.workspace/peers/peer2.hr.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/hr.workspace/peers/peer2.hr.workspace/tls:/etc/hyperledger/fabric/tls
- peer2.hr.workspace:/var/hyperledger/production
ports:
- 12051:12051
networks:
- byfn
peer1.marketing.workspace:
container_name: peer1.marketing.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.marketing.workspace
- CORE_PEER_ADDRESS=peer1.marketing.workspace:13051
- CORE_PEER_LISTENADDRESS=0.0.0.0:13051
- CORE_PEER_CHAINCODEADDRESS=peer1.marketing.workspace:13052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:13052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer2.marketing.workspace:14051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.marketing.workspace:13051
- CORE_PEER_LOCALMSPID=Org4MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/marketing.workspace/peers/peer1.marketing.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/marketing.workspace/peers/peer1.marketing.workspace/tls:/etc/hyperledger/fabric/tls
- peer1.marketing.workspace:/var/hyperledger/production
ports:
- 13051:13051
networks:
- byfn
peer2.marketing.workspace:
container_name: peer2.marketing.workspace
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer2.marketing.workspace
- CORE_PEER_ADDRESS=peer2.marketing.workspace:14051
- CORE_PEER_LISTENADDRESS=0.0.0.0:14051
- CORE_PEER_CHAINCODEADDRESS=peer2.marketing.workspace:14052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:14052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.marketing.workspace:13051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer2.marketing.workspace:14051
- CORE_PEER_LOCALMSPID=Org4MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/marketing.workspace/peers/peer2.marketing.workspace/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/marketing.workspace/peers/peer2.marketing.workspace/tls:/etc/hyperledger/fabric/tls
- peer2.marketing.workspace:/var/hyperledger/production
ports:
- 14051:14051
networks:
- byfn
cli:
container_name: cli
image: hyperledger/fabric-tools:$IMAGE_TAG
tty: true
stdin_open: true
environment:
- SYS_CHANNEL=$SYS_CHANNEL
- GOPATH=/opt/gopath
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
#- FABRIC_LOGGING_SPEC=DEBUG
- FABRIC_LOGGING_SPEC=INFO
- CORE_PEER_ID=cli
- CORE_PEER_ADDRESS=peer1.developers.workspace:7051
- CORE_PEER_LOCALMSPID=Org1MSP
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/developers.workspace/peers/peer1.developers.workspace/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/developers.workspace/peers/peer1.developers.workspace/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/developers.workspace/peers/peer1.developers.workspace/tls/ca.crt
- CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/developers.workspace/users/Admin#developers.workspace/msp
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: /bin/bash
volumes:
- /var/run/:/host/var/run/
- ./../chaincode/:/opt/gopath/src/github.com/chaincode
- ./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/
- ./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/
- ./myscripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/myscripts/
- ./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts
depends_on:
- orderer1.workspace
- orderer2.workspace
- orderer3.workspace
- orderer4.workspace
- orderer5.workspace
- peer1.developers.workspace
- peer2.developers.workspace
- peer1.accounts.workspace
- peer2.accounts.workspace
- peer1.hr.workspace
- peer2.hr.workspace
- peer1.marketing.workspace
- peer2.marketing.workspace
networks:
- byfn
# ca1:
# extends:
# file: base.yaml
# service: ca-base
# environment:
# - FABRIC_CA_SERVER_CA_NAME=ca-developers
# - FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.developers.workspace-cert.pem
# - FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk
# - FABRIC_CA_SERVER_PORT=7054
# ports:
# - "7054:7054"
# command: sh -c 'fabric-ca-server start --ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.developers.workspace-cert.pem --ca.keyfile /etc/hyperledger/fabric-ca-server-config/a75d012725ef434f86c9d31f02e748922f9d81f0dfcbb9e4890f1dfbd69a0424_sk -b admin:adminpw -d'
# volumes:
# - ./crypto-config/peerOrganizations/developers.workspace/ca/:/etc/hyperledger/fabric-ca-server-config
# container_name: ca_developers
# ca2:
# extends:
# file: base.yaml
# service: ca-base
# environment:
# - FABRIC_CA_SERVER_CA_NAME=ca-accounts
# - FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.accounts.workspace-cert.pem
# - FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk
# - FABRIC_CA_SERVER_PORT=9054
# ports:
# - "9054:9054"
# command: sh -c 'fabric-ca-server start --ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.accounts.workspace-cert.pem --ca.keyfile /etc/hyperledger/fabric-ca-server-config/caf48a1b5c6e3d0afa0ef05ff9e42dd890f7a64299d7a7a1cd0da301ffc65263_sk -b admin:adminpw -d'
# volumes:
# - ./crypto-config/peerOrganizations/accounts.workspace/ca/:/etc/hyperledger/fabric-ca-server-config
# container_name: ca_finance
# ca3:
# extends:
# file: base.yaml
# service: ca-base
# environment:
# - FABRIC_CA_SERVER_CA_NAME=ca-hr
# - FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.hr.workspace-cert.pem
# - FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk
# - FABRIC_CA_SERVER_PORT=11054
# ports:
# - "11054:11054"
# command: sh -c 'fabric-ca-server start --ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.hr.workspace-cert.pem --ca.keyfile /etc/hyperledger/fabric-ca-server-config/973b6fbc8397b467ec76dc32ad61104ac77034a1e2de1b98dedbb787c0540def_sk -b admin:adminpw -d'
# volumes:
# - ./crypto-config/peerOrganizations/hr.workspace/ca/:/etc/hyperledger/fabric-ca-server-config
# container_name: ca_hr
# ca4:
# extends:
# file: base.yaml
# service: ca-base
# environment:
# - FABRIC_CA_SERVER_CA_NAME=ca-marketing
# - FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.marketing.workspace-cert.pem
# - FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk
# - FABRIC_CA_SERVER_PORT=13054
# ports:
# - "13054:13054"
# command: sh -c 'fabric-ca-server start --ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.marketing.workspace-cert.pem --ca.keyfile /etc/hyperledger/fabric-ca-server-config/82b1b4f6bf80f8b948f8aac606ae7a46f8605b221e309b8cc3edea307020e56d_sk -b admin:adminpw -d'
# volumes:
# - ./crypto-config/peerOrganizations/marketing.workspace/ca/:/etc/hyperledger/fabric-ca-server-config
# container_name: ca_marketing
# networks:
# - byfn
I tried to start the network using the command mentioned below:
docker-compose -f docker-compose.yaml up -d
this is the error msg I got:
WARNING: Found orphan containers (ca_cbaccounts, peer2.apple.workspace, ca_apple, peer1.apple.workspace, peer1.citizenbank.workspace, peer2.fiserv.workspace, ca_finance, peer1.cbaccounts.workspace, peer2.cbaccounts.workspace, peer2.citizenbank.workspace, ca_citizenbank, peer1.fiserv.workspace) for this project. If you removed or renamed this service in your compose file, you can run this command with the --remove-orphans flag to clean it up.
Starting peer2.accounts.workspace ...
Starting peer1.accounts.workspace ...
Starting orderer1.workspace ...
orderer3.workspace is up-to-date
Starting peer2.hr.workspace ...
Starting peer1.marketing.workspace ...
Starting peer2.marketing.workspace ...
orderer2.workspace is up-to-date
Starting peer2.developers.workspace ...
Starting peer1.hr.workspace ...
Starting peer1.developers.workspace ...
orderer4.workspace is up-to-date
orderer5.workspace is up-to-date
ERROR: for peer2.accounts.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.accounts.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.marketing.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.hr.workspace a bytes-like object is required, not 'str'
Starting orderer1.workspace ... done
ERROR: for peer1.developers.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.marketing.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.developers.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.hr.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.accounts.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.accounts.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.marketing.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.hr.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.developers.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.marketing.workspace a bytes-like object is required, not 'str'
ERROR: for peer2.developers.workspace a bytes-like object is required, not 'str'
ERROR: for peer1.hr.workspace a bytes-like object is required, not 'str'
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 261, in _raise_for_status
response.raise_for_status()
File "/usr/lib/python3/dist-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.22/containers/f3081941fb6114633593ec5d8b4f258ecebe5829007027295c30a284ed7a3c8c/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/compose/service.py", line 625, in start_container
container.start()
File "/usr/lib/python3/dist-packages/compose/container.py", line 241, in start
return self.client.start(self.id, **options)
File "/usr/lib/python3/dist-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/usr/lib/python3/dist-packages/docker/api/container.py", line 1095, in start
self._raise_for_status(res)
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/usr/lib/python3/dist-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'driver failed programming external connectivity on endpoint peer1.hr.workspace (fcfb797b4e0e47c5d3611d652e56f06b9d5f4cdaad74b0663729c3773c39030a): Bind for 0.0.0.0:11051 failed: port is already allocated'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 128, in perform_command
handler(command, command_options)
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 1107, in up
to_attach = up(False)
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 1088, in up
return self.project.up(
File "/usr/lib/python3/dist-packages/compose/project.py", line 565, in up
results, errors = parallel.parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/project.py", line 548, in do
return service.execute_convergence_plan(
File "/usr/lib/python3/dist-packages/compose/service.py", line 567, in execute_convergence_plan
return self._execute_convergence_start(
File "/usr/lib/python3/dist-packages/compose/service.py", line 506, in _execute_convergence_start
_, errors = parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/service.py", line 508, in <lambda>
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
File "/usr/lib/python3/dist-packages/compose/service.py", line 620, in start_container_if_stopped
return self.start_container(container)
File "/usr/lib/python3/dist-packages/compose/service.py", line 627, in start_container
if "driver failed programming external connectivity" in ex.explanation:
TypeError: a bytes-like object is required, not 'str'
using:
Python 3.8.10
docker:
Client: Docker Engine - Community
Version: 20.10.20
API version: 1.41
Go version: go1.18.7
Git commit: 9fdeb9c
Built: Tue Oct 18 18:20:23 2022
OS/Arch: linux/amd64
Context: default
Experimental: true
Server: Docker Engine - Community
Engine:
Version: 20.10.20
API version: 1.41 (minimum version 1.12)
Go version: go1.18.7
Git commit: 03df974
Built: Tue Oct 18 18:18:12 2022
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.6.8
GitCommit: 9cd3357b7fd7218e4aec3eae239db1f68a5a6ec6
runc:
Version: 1.1.4
GitCommit: v1.1.4-0-g5fd4c4d
docker-init:
Version: 0.19.0
GitCommit: de40ad0
This doesn't seems to be some issue with fabric.

I just removed all the docker containers and executed the docker-compose -f docker-compose.yaml up -d command again and it worked. May be I was facing the issue because of orphaned containers.
Let me know your views in comments. thanks

Related

failed to resolve image name: short-name "caddy:2-alpine"

I get this error when running docker-compose up:
ERROR: failed to resolve image name: short-name "caddy:2-alpine" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
Here is my docker-compose.yaml file:
version: "3"
#networks:
# web:
# external: true
# bridge:
# driver: bridge
services:
# CaddyServer reverse proxy
caddy:
restart: always
image: caddy:2-alpine
ports:
- "443:443"
command: caddy reverse-proxy --from https://xxxxxx.com --to http://0.0.0.0:8000
#volumes:
# - /local/path/to/Caddyfile:/path/inside/continer/to/Caddyfile
# networks:
# - web
# - bridge
# Django web app
django:
restart: always
build: .
ports:
- "80:8000"
depends_on:
- pgdb
#environment:
# - url=https://api.backend.example.com
#command: "gunicorn config.wsgi:application --bind 0.0.0.0:8000"
#networks:
# - bridge
pgdb:
image: postgres
container_name: pgdb
environment:
- POSTGRES_DB=xxxxx
- POSTGRES_USER=xxxx
- POSTGRES_PASSWORD=xxxx
volumes:
- pg-data:/var/lib/postgresql/data/
volumes:
pg-data:
I was Getting this error short-name "postgres" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf.
The problem was my docker was not properly installed
https://www.simplilearn.com/tutorials/docker-tutorial/how-to-install-docker-on-ubuntu
I followed this page and reinstalled docker.
it Solved for me.

OIDC Redirect URI Error in Dockerized Django

I'm running two applications using docker-compose. Each application has a bunch of containers. The intention is for App A (django app) to host the OIDC provider, while App B (some other app) will authenticate users by calling the App A API.
I'm using the django-oidc-provider library (https://django-oidc-provider.readthedocs.io/en/latest/index.html)
I've already configured the OIDC integration on both sides. However, every time App B redirects to App A, I hit the following error:
Redirect URI Error
The request fails due to a missing, invalid, or mismatching redirection URI (redirect_uri).
Even though the redirect_uri matches exactly on both sides.
Here's my docker-compose.yml:
version: '3'
networks:
default:
external:
name: datahub-gms_default
services:
django:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: dqt
container_name: dqt
hostname: dqt
platform: linux/x86_64
depends_on:
- postgres
volumes:
- .:/app:z
environment:
- DJANGO_READ_DOT_ENV_FILE=true
env_file:
- ./.envs/.local/.django
- ./.envs/.local/.postgres
ports:
- "8000:8000"
command: /start
postgres:
build:
context: .
dockerfile: ./compose/local/postgres/Dockerfile
image: postgres
container_name: postgres
hostname: postgres
volumes:
- dqt_local_postgres_data:/var/lib/postgresql/data:Z
- dqt_local_postgres_data_backups:/backups:z
env_file:
- ./.envs/.local/.postgres
broker:
container_name: broker
depends_on:
- zookeeper
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS=0
- KAFKA_HEAP_OPTS=-Xms256m -Xmx256m
hostname: broker
image: confluentinc/cp-kafka:5.4.0
ports:
- 29092:29092
- 9092:9092
datahub-actions:
depends_on:
- datahub-gms
environment:
- GMS_HOST=datahub-gms
- GMS_PORT=8080
- KAFKA_BOOTSTRAP_SERVER=broker:29092
- SCHEMA_REGISTRY_URL=http://schema-registry:8081
- METADATA_AUDIT_EVENT_NAME=MetadataAuditEvent_v4
- METADATA_CHANGE_LOG_VERSIONED_TOPIC_NAME=MetadataChangeLog_Versioned_v1
- DATAHUB_SYSTEM_CLIENT_ID=__datahub_system
- DATAHUB_SYSTEM_CLIENT_SECRET=JohnSnowKnowsNothing
- KAFKA_PROPERTIES_SECURITY_PROTOCOL=PLAINTEXT
hostname: actions
image: public.ecr.aws/datahub/acryl-datahub-actions:${ACTIONS_VERSION:-head}
datahub-frontend-react:
container_name: datahub-frontend-react
depends_on:
- datahub-gms
environment:
- DATAHUB_GMS_HOST=datahub-gms
- DATAHUB_GMS_PORT=8080
- DATAHUB_SECRET=YouKnowNothing
- DATAHUB_APP_VERSION=1.0
- DATAHUB_PLAY_MEM_BUFFER_SIZE=10MB
- JAVA_OPTS=-Xms512m -Xmx512m -Dhttp.port=9002 -Dconfig.file=datahub-frontend/conf/application.conf
-Djava.security.auth.login.config=datahub-frontend/conf/jaas.conf -Dlogback.configurationFile=datahub-frontend/conf/logback.xml
-Dlogback.debug=false -Dpidfile.path=/dev/null
- KAFKA_BOOTSTRAP_SERVER=broker:29092
- DATAHUB_TRACKING_TOPIC=DataHubUsageEvent_v1
- ELASTIC_CLIENT_HOST=elasticsearch
- ELASTIC_CLIENT_PORT=9200
- AUTH_OIDC_ENABLED=true
- AUTH_OIDC_CLIENT_ID=778948
- AUTH_OIDC_CLIENT_SECRET=some-client-secret
- AUTH_OIDC_DISCOVERY_URI=http://dqt:8000/openid/.well-known/openid-configuration/
- AUTH_OIDC_BASE_URL=http://datahub:9002/
hostname: datahub
image: linkedin/datahub-frontend-react:${DATAHUB_VERSION:-head}
ports:
- 9002:9002
datahub-gms:
container_name: datahub-gms
depends_on:
- mysql
environment:
- DATASET_ENABLE_SCSI=false
- EBEAN_DATASOURCE_USERNAME=datahub
- EBEAN_DATASOURCE_PASSWORD=datahub
- EBEAN_DATASOURCE_HOST=mysql:3306
- EBEAN_DATASOURCE_URL=jdbc:mysql://mysql:3306/datahub?verifyServerCertificate=false&useSSL=true&useUnicode=yes&characterEncoding=UTF-8
- EBEAN_DATASOURCE_DRIVER=com.mysql.jdbc.Driver
- KAFKA_BOOTSTRAP_SERVER=broker:29092
- KAFKA_SCHEMAREGISTRY_URL=http://schema-registry:8081
- ELASTICSEARCH_HOST=elasticsearch
- ELASTICSEARCH_PORT=9200
- GRAPH_SERVICE_IMPL=elasticsearch
- JAVA_OPTS=-Xms1g -Xmx1g
- ENTITY_REGISTRY_CONFIG_PATH=/datahub/datahub-gms/resources/entity-registry.yml
- MAE_CONSUMER_ENABLED=true
- MCE_CONSUMER_ENABLED=true
hostname: datahub-gms
image: linkedin/datahub-gms:${DATAHUB_VERSION:-head}
ports:
- 8080:8080
volumes:
- ${HOME}/.datahub/plugins:/etc/datahub/plugins
elasticsearch:
container_name: elasticsearch
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- ES_JAVA_OPTS=-Xms256m -Xmx256m -Dlog4j2.formatMsgNoLookups=true
healthcheck:
retries: 4
start_period: 2m
test:
- CMD-SHELL
- curl -sS --fail 'http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=0s' || exit 1
hostname: elasticsearch
image: elasticsearch:7.9.3
mem_limit: 1g
ports:
- 9200:9200
volumes:
- esdata:/usr/share/elasticsearch/data
elasticsearch-setup:
container_name: elasticsearch-setup
depends_on:
- elasticsearch
environment:
- ELASTICSEARCH_HOST=elasticsearch
- ELASTICSEARCH_PORT=9200
- ELASTICSEARCH_PROTOCOL=http
hostname: elasticsearch-setup
image: linkedin/datahub-elasticsearch-setup:${DATAHUB_VERSION:-head}
kafka-setup:
container_name: kafka-setup
depends_on:
- broker
- schema-registry
environment:
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_BOOTSTRAP_SERVER=broker:29092
hostname: kafka-setup
image: linkedin/datahub-kafka-setup:${DATAHUB_VERSION:-head}
mysql:
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_bin
container_name: mysql
environment:
- MYSQL_DATABASE=datahub
- MYSQL_USER=datahub
- MYSQL_PASSWORD=datahub
- MYSQL_ROOT_PASSWORD=datahub
hostname: mysql
image: mysql:5.7
ports:
- 3306:3306
volumes:
- ../mysql/init.sql:/docker-entrypoint-initdb.d/init.sql
- mysqldata:/var/lib/mysql
mysql-setup:
container_name: mysql-setup
depends_on:
- mysql
environment:
- MYSQL_HOST=mysql
- MYSQL_PORT=3306
- MYSQL_USERNAME=datahub
- MYSQL_PASSWORD=datahub
- DATAHUB_DB_NAME=datahub
hostname: mysql-setup
image: acryldata/datahub-mysql-setup:head
schema-registry:
container_name: schema-registry
depends_on:
- zookeeper
- broker
environment:
- SCHEMA_REGISTRY_HOST_NAME=schemaregistry
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL=zookeeper:2181
hostname: schema-registry
image: confluentinc/cp-schema-registry:5.4.0
ports:
- 8081:8081
zookeeper:
container_name: zookeeper
environment:
- ZOOKEEPER_CLIENT_PORT=2181
- ZOOKEEPER_TICK_TIME=2000
hostname: zookeeper
image: confluentinc/cp-zookeeper:5.4.0
ports:
- 2181:2181
volumes:
- zkdata:/var/opt/zookeeper
volumes:
dqt_local_postgres_data: {}
dqt_local_postgres_data_backups: {}
esdata: null
mysqldata: null
zkdata: null
In the above, container datahub-frontend-react is supposed to integrate into container dqt for the OIDC authentication.
The docker log doesn't show any exceptions, and the http code is 200:
dqt | [28/Feb/2022 10:43:43] "GET /openid/.well-known/openid-configuration/ HTTP/1.1" 200 682
dqt | [28/Feb/2022 10:43:44] "GET /openid/authorize?response_type=code&redirect_uri=http%3A%2F%2Fdatahub%3A9002%2F%2Fcallback%2Foidc&state=9Fj1Bog-ZN8fhN2kufWng2fRGaqCYnkMz6n3yKxPowo&client_id=778948&scope=openid+profile+email HTTP/1.1" 200 126
Here's the redirect_uri configuration in django admin:
I'm suspecting it could be related to the fact that they are different containers with different hostnames (I don't know what to do about that).
What could be the root cause of this issue?
Your log shows that the app is redirecting with this login URL, with two %2F characters, so the URL used by the app is different to that configured:
http://datahub:9002//callback/oidc
INTERNAL AND EXTERNAL URLs
Not sure if it will work once you resolve that though, since the callback URL looks like a Docker Compose internal URL, that the browser will be unable to reach. Aim to return a URL such as this instead:
http://localhost:9002/callback/oidc
One option that can be useful to make URLs more understandable during development, and to plan the real deployment, is to add custom host names to your computer's hosts file. You can then login via URLs such as http://www.myapp.com, which I find clearer.
See these resources for something to compare against, which describe a setup with both internal and external URLs.
Custom Hosts
Docker Compose Example

yaml: line 8:did not find expected key

these are my first times with docker, I'm trying to put the directives for MariaDB in my composer file. Once the file is written, if I try to run the docker compose up command, it gives me the following error: yaml: line 8: did not find expected key. Does anyone have the same problem as me? How can I solve? Thanks so much.
Below is my docker-compose.yaml file
version: '3'
services:
backend:
build: ./
restart: always
volumes:
- ./application:/var/www/html
ports: [80:80]
mariadb:
image: 'bitnami/mariadb:latest'
ports:
- '3306:3306'
volumes:
- './mariadb_data:/bitnami/mariadb'
environment:
- ALLOW_EMPTY_PASSWORD=yes
- MARIADB_DATABASE=db_test
- MARIADB_USER=test_user
- MARIADB_PASSWORD=password
- MARIADB_ROOT_HOST='%'
volumes:
application:
driver: local
mariadb_data:
driver: local
It happens when we do our own yml file for docker, you need to indent two spaces for sub entries under image details:
version: '1'
services:
mariadb-ikg:
image: bitnami/mariadb:10.3
ports:
- 3306:3306
volumes:
- D:/docker/bitnami-mariadb/databases:/bitnami/mariadb
environment:
- MARIADB_ROOT_PASSWORD=123456
phpfpm-ikg:
image: wyveo/nginx-php-fpm:php80
ports:
- 80:80
volumes:
- D:/docker/wyveo-nginx-php-fpm/wordpress:/usr/share/nginx/html
depends_on:
- mariadb-ikg

Airflow log file exception

I am using apache airflow for running my dags.
I am getting an exception as:
*** Log file does not exist: /opt/airflow/logs/download2/download2/2020-07-26T15:00:00+00:00/1.log
*** Fetching from: http://fb3393f5f01e:8793/log/download2/download2/2020-07-26T15:00:00+00:00/1.log
*** Failed to fetch log file from worker. HTTPConnectionPool(host='fb3393f5f01e', port=8793): Max retries exceeded with url: /log/download2/download2/2020-07-26T15:00:00+00:00/1.log (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f8ba66d7b70>: Failed to establish a new connection: [Errno 111] Connection refused',))
My docker compose file for webserver, scheduler and postgres is:
version: "2.1"
services:
postgres_airflow:
image: postgres:12
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
ports:
- "5432:5432"
postgres_Service:
image: postgres:12
environment:
- POSTGRES_USER=developer
- POSTGRES_PASSWORD=secret
- POSTGRES_DB=service_db
ports:
- "5433:5432"
scheduler:
image: apache/airflow
restart: always
depends_on:
- postgres_airflow
- postgres_Service
- webserver
env_file:
- .env
volumes:
- ./dags:/opt/airflow/dags
command: scheduler
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
timeout: 30s
retries: 3
webserver:
image: apache/airflow
restart: always
depends_on:
- pg_airflow
- pg_metadata
- tenants-registry-api
- metadata-api
env_file:
- .env
volumes:
- ./dags:/opt/airflow/dags
- ./scripts:/opt/airflow/scripts
ports:
- "8080:8080"
entrypoint: ./scripts/airflow-entrypoint.sh
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
timeout: 30s
retries: 3
I am getting this exception while using the PythonVirtualenvOperator.
My dag file is:
from datetime import datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
default_args = {'owner': 'airflow',
'start_date': datetime(2018, 1, 1)
}
dag = DAG('download2',
schedule_interval='0 * * * *',
default_args=default_args,
catchup=False)
def hello_world_py():
return "data"
with dag:
t1 = PythonOperator(
task_id='download2',
python_callable=hello_world_py,
op_kwargs=None,
provide_context=True,
dag=dag
)
env file:
AIRFLOW__CORE__SQL_ALCHEMY_CONN=postgresql://airflow:airflow#postgres_airflow:5432/airflow
AIRFLOW__CORE__FERNET_KEY=XXXX
AIRFLOW_CONN_METADATA_DB=postgres://developer:secret#postgres_Service:5432/service_db
AIRFLOW__VAR__METADATA_DB_SCHEMA=service_db
AIRFLOW__WEBSERVER__BASE_URL=http://0.0.0.0:8080/
I have also explicitly set AIRFLOW__CORE__REMOTE_LOGGING=False to disable the remote logs, still getting an exception.
Also tried placing everything inside the bridge network. Nothing worked for me, though the DAG passes.
Also tried adding:
image: apache/airflow
restart: always
depends_on:
- scheduler
volumes:
- ./dags:/opt/airflow/dags
env_file:
- .env
ports:
- 8793:8793
command: worker
Did not work for me
You need to expose worker log-server port (worker_log_server_port setting in airflow.cfg, 8793 by default) in docker-compose, like:
worker:
image: apache/airflow
...
ports:
- 8793:8793
Here is a slightly different approach I've seen a few folks use when running the scheduler and webserver in their own containers and using LocalExecutor (which I'm guessing is the case here):
Mount a host log directory as a volume into both the scheduler and webserver containers:
volumes:
- /location/on/host/airflow/logs:/opt/airflow/logs
Make sure the user within the airflow containers (usually airflow) has permissions to read and write that directory. If the permissions are wrong you will see an error like the one in your post.
This probably won't scale beyond LocalExecutor usage though.

How do I fix my docker-compose.yml , error in installing Mayan-EDMS with django

I am trying to install the Mayan-EDMS image with the Django app and Postgres database using docker-compose but each time, I try to build docker-compose using docker-compose up it gives an error.
ERROR: yaml.parser.ParserError: while parsing a block mapping
in "./docker-compose.yml", line 8, column 3
expected <block end>, but found '<block mapping start>'
in "./docker-compose.yml", line 29, column 4
here is my docker-compose.yml
docker-compose contain postgres:11.4-alpine,redis:5.0-alpine and mayanedms/mayanedms:3
version: "3"
networks:
bridge:
driver: bridge
services:
app:
container_name: django
restart: always
build:
context: .
ports:
- "8000:8000"
volumes:
- ./app:/app
environment:
- DB_NAME=app
- DB_USER=insights
- DB_HOST=db
- DB_PORT=5432
depends_on:
- db
command: >
sh -c "mkdir -p logs media &&
python manage.py wait_for_db &&
python manage.py runserver 0.0.0.0:8000"
db:
image: postgres:11.4-alpine
container_name: postgres
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- POSTGRES_USER=insights
- POSTGRES_DB=app
redis:
command:
- redis-server
- --appendonly
- "no"
- --databases
- "2"
- --maxmemory
- "100mb"
- --maxclients
- "500"
- --maxmemory-policy
- "allkeys-lru"
- --save
- ""
- --tcp-backlog
- "256"
- --requirepass
- "${MAYAN_REDIS_PASSWORD:-mayanredispassword}"
image: redis:5.0-alpine
networks:
- bridge
restart: unless-stopped
volumes:
- redis_data:/data
mayanedms:
image: mayanedms/mayanedms:3
container_name: mayanedms
restart: unless-stopped
ports:
- "80:8000"
depends_on:
- db
- redis
volumes:
- mayanedms_data:/var/lib/mayan
environment: &mayan_env
MAYAN_CELERY_BROKER_URL: redis://:${MAYAN_REDIS_PASSWORD:-mayanredispassword}#redis:6379/0
MAYAN_CELERY_RESULT_BACKEND: redis://:${MAYAN_REDIS_PASSWORD:-mayanredispassword}#redis:6379/1
MAYAN_DATABASES: "{'default':{'ENGINE':'django.db.backends.postgresql','NAME':'${MAYAN_DATABASE_DB:-mayan}','PASSWORD':'${MAYAN_DATABASE_PASSWORD:-mayandbpass}','USER':'${MAYAN_DATABASE_USER:-mayan}','HOST':'postgresql'}}"
MAYAN_DOCKER_WAIT: "db:5432 redis:6379"
networks:
- bridge
background_tasks:
restart: always
container_name: process_tasks
build:
context: .
depends_on:
- app
- db
environment:
- DB_NAME=app
- DB_USER=insights
- DB_HOST=db
- DB_PORT=5432
volumes:
- ./app:/app
command: >
sh -c "python manage.py process_tasks --sleep=3 --log-std --traceback"
volumes:
postgres_data:
redis_data:
mayanedms_data:
thank you for help
YAML indentation in your docker-compose.yml is wrong. YAML files rely on space indentation to define structure, but indentation for service db uses 3 space where app uses 2 space - when parsing your file, Compose interpret db (3 spaces) to be a sub-component of app (2 spaces), its like you are doing:
services:
app:
...
db:
...
Or an equivalent in json:
"services": {
"app": {
"db": {
...
}
}
}
Where what you need is:
services:
app:
...
db:
...
Equivalent in json:
"services": {
"app": {
...
},
"db": {
...
}
}
Same issue for all the other services definition and with volumes. volumes must be a top-level element, but with a space it is read a sub-component of services