How to run Node.js webdriverio Appium tests on AWS Device Farm - amazon-web-services

I got a working configuration locally. Running test with Node webdriverio
But I want to automate this with de device farm
const webdriverIO = require("webdriverio");
const opts = {
path: '/wd/hub',
port: 4723,
capabilities: {
platformName: "Android",
platformVersion: "9",
deviceName: "emulator-5554",
app: __dirname +"/app-debug.apk",
appPackage: "com.ayga.cooktop",
appActivity: ".MainActivity",
automationName: "UiAutomator2"
}
};
const main = async () => {
const client = await webdriverIO.remote(opts);
.... actual tests
};
void main();
My build file:
version: 0.1
phases:
install:
commands:
- nvm install 12.16.1
- echo "Navigate to test package directory"
- cd $DEVICEFARM_TEST_PACKAGE_PATH
- npm install *.tgz
- export APPIUM_VERSION=1.14.2
- avm $APPIUM_VERSION
- ln -s /usr/local/avm/versions/$APPIUM_VERSION/node_modules/.bin/appium /usr/local/avm/versions/$APPIUM_VERSION/node_modules/appium/bin/appium.js
pre_test:
commands:
- echo "Start appium server"
- >-
appium --log-timestamp
--default-capabilities "{\"deviceName\": \"$DEVICEFARM_DEVICE_NAME\", \"platformName\":\"$DEVICEFARM_DEVICE_PLATFORM_NAME\",
\"app\":\"$DEVICEFARM_APP_PATH\", \"udid\":\"$DEVICEFARM_DEVICE_UDID\", \"platformVersion\":\"$DEVICEFARM_DEVICE_OS_VERSION\",
\"chromedriverExecutable\":\"$DEVICEFARM_CHROMEDRIVER_EXECUTABLE\"}"
>> $DEVICEFARM_LOG_DIR/appiumlog.txt 2>&1 &
- >-
start_appium_timeout=0;
while [ true ];
do
if [ $start_appium_timeout -gt 60 ];
then
echo "appium server never started in 60 seconds. Exiting";
exit 1;
fi;
grep -i "Appium REST http interface listener started on 0.0.0.0:4723" $DEVICEFARM_LOG_DIR/appiumlog.txt >> /dev/null 2>&1;
if [ $? -eq 0 ];
then
echo "Appium REST http interface listener started on 0.0.0.0:4723";
break;
else
echo "Waiting for appium server to start. Sleeping for 1 second";
sleep 1;
start_appium_timeout=$((start_appium_timeout+1));
fi;
done;
test:
commands:
- echo "Navigate to test source code"
- cd $DEVICEFARM_TEST_PACKAGE_PATH/node_modules/*
- echo "Start Appium Node test"
- node index.js
post_test:
commands:
artifacts:
- $DEVICEFARM_LOG_DIR
my test runs, but i was not able to find the apk
How to I tell AWS the apk, deviceName and etc
const opts = {
path: '/wd/hub',
port: 4723,
capabilities: {
platformName: "Android",
platformVersion: "9",
deviceName: "emulator-5554",
app: __dirname +"/app-debug.apk",
appPackage: "com.ayga.cooktop",
appActivity: ".MainActivity",
automationName: "UiAutomator2"
}
};

You have to set the capabilities to the AWS provided ENVVARS just like below.
const apkInfo = {
appPackage: "com.ayga.cooktopbt",
appActivity: "com.ayga.cooktop.MainActivity",
automationName: "UiAutomator2"
};
const awsOptions = {
path: '/wd/hub',
port: 4723,
capabilities: {
...apkInfo,
platformName: process.env.DEVICEFARM_DEVICE_PLATFORM_NAME,
deviceName: process.env.DEVICEFARM_DEVICE_NAME,
app: process.env.DEVICEFARM_APP_PATH,
}
};

Related

Bitbucket pipeline add description in version description

I am trying to add description to newly created version only, but no luck ;-(
I can add description to lambda and alias, but not to newly created version.
You can refer below screenshot where I want my description from pipeline.
Here is my code in yml file
- step:
name: Build and publish version
oidc: true
script:
- apt-get update && apt-get install -y zip jq
- for dir in ${LAMBDA_FUNCTION_NAME}; do
- echo $dir
- cd ./$dir && npm install --production
- zip -r code.zip *
# lambda config
# - export ENVIRONMENT="dev"
# Create lambda configuration file with environment variables
- export LAMBDA_CONFIG="{}"
- echo $LAMBDA_CONFIG > ./lambdaConfig.json
- pipe: atlassian/aws-lambda-deploy:1.5.0
variables:
AWS_DEFAULT_REGION: $AWS_DEFAULT_REGION
AWS_OIDC_ROLE_ARN: '############################'
FUNCTION_NAME: $dir
COMMAND: 'update'
ZIP_FILE: 'code.zip'
FUNCTION_CONFIGURATION: "lambdaConfig.json"
WAIT: "true"
#PUBLISH_FLAG: "false"
- BITBUCKET_PIPE_SHARED_STORAGE_DIR="/opt/atlassian/pipelines/agent/build/.bitbucket/pipelines/generated/pipeline/pipes"
# - if [ ${ALIAS} == "" ]; then echo "Setup successful"; else
- VERSION=$(jq --raw-output '.Version' $BITBUCKET_PIPE_SHARED_STORAGE_DIR/aws-lambda-deploy-env)
- cd .. && echo ${VERSION} > ./version.txt
- echo "Published version:${VERSION}"
- cat version.txt
- VERSION=$(cat ./version.txt)
#- fi;
- pipe: atlassian/aws-lambda-deploy:1.5.0
variables:
AWS_DEFAULT_REGION: $AWS_DEFAULT_REGION
AWS_OIDC_ROLE_ARN: '########################'
FUNCTION_NAME: $dir
COMMAND: 'alias'
ALIAS: ${ALIAS}
VERSION: '${VERSION}'
DESCRIPTION: ${DESCRIPTION}
- done

Running same Docker image with different flags on AWS and saving results

I have an experiment I'd like to run 100 different times, each with a command line flag set to a different integer value. Each experiment will output the result to a text file. Experiments take about 2 hours each and are independent of each other.
I currently have a Docker image that can run the experiment when provided the command line flag.
I am curious if there is a way to write a script that can launch 100 AWS instances (one for each possible flag value), run the Docker image, and then output the result to a shared text file somewhere. Is this possible? I am very inexperienced with AWS so I'm not sure if this is the proper tool or what steps would be required (besides building the Docker image).
Thanks.
You could do this using vagrant with the vagrant-aws plugin to spin up the instances and the Docker Provisioner to pull your images / run your containers or the Ansible Provisioner. For example:
.
├── playbook.yml
└── Vagrantfile
The Vagrantfile:
# -*- mode: ruby -*-
# vi: set ft=ruby :
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
N = 100
(1..N).each do |server_id|
config.vm.box = "dummy"
config.ssh.forward_agent = true
config.vm.define "server#{server_id}" do |server|
server.vm.provider :aws do |aws, override|
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.instance_type = "t2.micro"
aws.block_device_mapping = [
{
"DeviceName" => "/dev/sda1",
"Ebs.VolumeSize" => 30
}
]
aws.tags = {
"Name" => "node#{server_id}.example.com",
"Environment" => "stage"
}
aws.subnet_id = "subnet-d65893b0"
aws.security_groups = [
"sg-deadbeef"
]
aws.region = "eu-west-1"
aws.region_config "eu-west-1" do |region|
region.ami = "ami-0635ad49b5839867c"
region.keypair_name = "ubuntu"
end
aws.monitoring = true
aws.associate_public_ip = false
aws.ssh_host_attribute = :private_ip_address
override.ssh.username = "ubuntu"
override.ssh.private_key_path = ENV["HOME"] + "/.ssh/id_rsa"
override.ssh.forward_agent = true
end
if server_id == N
server.vm.provision :ansible do |ansible|
ansible.limit = "all"
ansible.playbook = "playbook.yml"
ansible.compatibility_mode = "2.0"
ansible.raw_ssh_args = "-o ForwardAgent=yes"
ansible.extra_vars = {
"ansible_python_interpreter": "/usr/bin/python3"
}
end
end
end
end
end
Note: this example does ansible parallel execution from the Tips & Tricks.
The ansible playbook.yml:
- hosts: all
pre_tasks:
- name: get instance facts
local_action:
module: ec2_instance_facts
filters:
private-dns-name: '{{ ansible_fqdn }}'
"tag:Environment": stage
register: _ec2_instance_facts
- name: add route53 entry
local_action:
module: route53
state: present
private_zone: yes
zone: 'example.com'
record: '{{ _ec2_instance_facts.instances[0].tags["Name"] }}'
type: A
ttl: 7200
value: '{{ _ec2_instance_facts.instances[0].private_ip_address }}'
wait: yes
overwrite: yes
tasks:
- name: install build requirements
apt:
name: ['python3-pip', 'python3-socks', 'git']
state: present
update_cache: yes
become: true
- name: apt install docker requirements
apt:
name: ['apt-transport-https', 'ca-certificates', 'curl', 'gnupg-agent', 'software-properties-common']
state: present
become: true
- name: add docker apt key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
become: true
- name: add docker apt repository
apt_repository:
repo: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable'
state: present
become: true
- name: apt install docker-ce
apt:
name: ['docker-ce', 'docker-ce-cli', 'containerd.io']
state: present
update_cache: yes
become: true
- name: get docker-compose
get_url:
url: 'https://github.com/docker/compose/releases/download/1.24.1/docker-compose-{{ ansible_system }}-{{ ansible_userspace_architecture }}'
dest: /usr/local/bin/docker-compose
mode: '0755'
become: true
- name: pip install docker and boto3
pip:
name: ['boto3', 'docker', 'docker-compose']
executable: pip3
- name: create docker config directory
file:
path: /etc/docker
state: directory
become: true
- name: copy docker daemon.json
copy:
content: |
{
"group": "docker",
"log-driver": "journald",
"live-restore": true,
"experimental": true,
"insecure-registries" : [],
"features": { "buildkit": true }
}
dest: /etc/docker/daemon.json
become: true
- name: enable docker service
service:
name: docker
enabled: yes
become: true
- name: add ubuntu user to docker group
user:
name: ubuntu
groups: docker
append: yes
become: true
- name: restart docker daemon
systemd:
state: restarted
daemon_reload: yes
name: docker
no_block: yes
become: true
# pull your images then run your containers
The only approach that I can think of is using AWS SSM to run multiple commands but still, you might need to spin 100's of instances and that would not be the good approach.
Below are the set of commands you can use :
Spin instance using below Cloudformation template, run it in loop to create multiple instances :
---
Resources:
MyInstance:
Type: AWS::EC2::Instance
Properties:
AvailabilityZone: <region>
ImageId: <amiID>
InstanceType: t2.micro
KeyName : <KeyName>
Use below command to get the intance ID :
aws ec2 describe-instances --filters 'Name=tag:Name,Values=EC2' --query 'Reservations[*].Instances[*].InstanceId' --output text
Using that instance-id, run below command :
aws ssm send-command --instance-ids "<instanceID>" --document-name "AWS-RunShellScript" --comment "<COMMENT>" --parameters commands='sudo yum update -y' --output text
I don't think docker will be of any help here as that would complicate things for you due to SSM agent installation. So your best bet would be running commands one by one and finally storing your output in S3.

access elasticsearch running in docker in aws

I am running an elasticsearch service in docker in aws, and i have another docker container that runs a dotnet application; this is an excerpt of the docker-compose file i am using
version: '3.1'
services:
someservice:
build:
context: ./
dockerfile: Dockerfile
args:
- AWS_ACCESS_KEY_ID
- AWS_REGION
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
restart: always
container_name: some-server
ports:
- 8080:8080
links:
- elastic
elastic:
image: docker.elastic.co/elasticsearch/elasticsearch:6.4.0
environment:
TAKE_FILE_OWNERSHIP: "true"
volumes:
- ./.data/elastic:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
ulimits:
nofile:
soft: "65536"
hard: "65536"
and this is an excerpt of the Dockerfile
FROM microsoft/dotnet:2.2-sdk AS build-env
WORKDIR /app
WORKDIR /app
RUN mkdir /Service/
WORKDIR /app/Service/
COPY local_foler/Service/*.csproj ./
RUN dotnet restore
# Copy everything else and build
COPY local_folder/Service/. ./
RUN dotnet publish -c Release -o out
# Build runtime image
FROM microsoft/dotnet:aspnetcore-runtime
ARG AWS_ACCESS_KEY_ID
ARG AWS_SECRET_ACCESS_KEY
ENV AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
ENV AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID
ENV ASPNETCORE_URLS="http://0.0.0.0:8080"
EXPOSE 8080
WORKDIR /app
COPY --from=build-env /app/Service/out .
ENTRYPOINT ["dotnet", "Service.dll"]
in the Service application i am using NEST client, something like this
var settings = new ConnectionSettings(new Uri(ELASTIC_SERVICE_URI)).DefaultIndex("some-index").EnableDebugMode();
var client = new ElasticClient(settings);
var pingResponse = client.Ping();
if (pingResponse.IsValid)
{
string searchTerm = "*";
if (searchParam != null)
{
searchTerm = "*" + WebUtility.UrlDecode(searchParam) + "*";
}
int from = (int)(fromParam == null ? 0 : fromParam);
int size = (int)(sizeParam == null ? 10 : sizeParam);
var searchResponse = client.Search<SomeType>(s => s
.From(from)
.Size(size)
.AllTypes()
.Query(q => q
.Bool(bq => bq
.Must(m => m
.QueryString(qs => qs
.Query(searchTerm)
.AnalyzeWildcard(true)
.DefaultField("*")
)
)
)
)
);
return Ok(searchResponse.Documents);
if i run the docker containers locally, i can set the ELASTIC_SERVICE_URI const to http://elastic:9200 and it works; i even can point that to the elastic service running in aws like this https://HOST.us-east-2.es.amazonaws.com
in both cases the service works fine and the data from the search is returning
but when i run the containers from aws the data is not retrieved, i just get an empty collection
what can be wrong ?

how to start an eventstore in Gitlab-CI test

I have the following code in my gitlab repo
package.json
{
...
"scripts": {
"test": "mocha --require ts-node/register --watch-extensions ts,tsx \"src/**/*.{spec,test}.{ts,tsx}\""
}
...
}
.gitlab-ci.yml
stages:
- test
test:
image: node:8
stage: test
script:
- npm install
- npm run test
test.ts
import { exec } from 'child_process';
import { promisify } from 'util';
const Exec = promisify(exec);
describe(test, async () => {
before(async () => {
// next line doesn't work in GitLab-CI
await Exec(`docker run -d --rm -p 1113:1113 -p 2113:2113 eventstore/eventstore`);
// an so on
})
});
it work well when I run "npm run test" in my local machine.
My Question is how can I run this test in Gitlab-CI?
If you try to run tests that connect to eventstore docker you can use gitlab services:
GitLab CI uses the services keyword to define what docker containers
should be linked with your base image.
first, you will need to setup docker executor
then you will be able to use eventstore as a service. here is an example with postgres. more information here.
Example:
test_server:
tags:
- docker
services:
- eventstore:latest
script:
- npm install && npm run test
Edit:
To access the service:
The default aliases for the service’s hostname are created from its image name
Or use an alias:
services:
- name: mysql:latest
alias: mysql-1

How to run my jenkins pipeline code in AWS CodeBuild?

I can trigger my AWS pipeline from jenkins but I don't want to create buildspec.yaml and instead use the pipeline script which already works for jenkins.
In order to user Codebuild you need to provide the Codebuild project with a buildspec.yaml file along with your source code or incorporate the commands into the actual project.
However, I think you are interested in having the creation of the buildspec.yaml file done within the Jenkins pipeline.
Below is a snippet of a stage within a Jenkinsfile, it creates a build spec file for building docker images and then sends the contents of the workspace to a codebuild project. This uses the plugin for Codebuild.
stage('Build - Non Prod'){
String nonProductionBuildSpec = """
version: 0.1
phases:
pre_build:
commands:
- \$(aws ecr get-login --registry-ids <number> --region us-east-1)
build:
commands:
- docker build -t ces-sample-docker .
- docker tag $NAME:$TAG <account-number>.dkr.ecr.us-east-1.amazonaws.com/$NAME:$TAG
post_build:
commands:
- docker push <account-number>.dkr.ecr.us-east-1.amazonaws.com/$NAME:$TAG
""".replace("\t"," ")
writeFile file: 'buildspec.yml', text: nonProductionBuildSpec
//Send checked out files to AWS
awsCodeBuild projectName: "<codebuild-projectname>",region: "us-east-1", sourceControlType: "jenkins"
}
I hope this gives you an idea of whats possible.
Good luck!
Patrick
You will need to write a buildspec for the commands that you want AWS CodeBuild to run. If you use the CodeBuild plugin for Jenkins, you can add that to your Jenkins pipeline and use CodeBuild as a Jenkins build slave to execute the commands in your buildspec.
See more details here: https://docs.aws.amazon.com/codebuild/latest/userguide/jenkins-plugin.html
#hynespm - excellent example mate.
Here is another one based off yours but with stripIndent() and "withAWS" to switch roles:
#!/usr/bin/env groovy
def cbResult = null
pipeline {
.
.
.
script {
echo ("app_version TestwithAWS value : " + "${app_version}")
String buildspec = """\
version: 0.2
env:
parameter-store:
TOKEN: /some/token
phases:
pre_build:
commands:
- echo "List files...."
- ls -l
- echo "TOKEN is ':' \${TOKEN}"
build:
commands:
- echo "build':' Do something here..."
- echo "\${CODEBUILD_SRC_DIR}"
- ls -l "\${CODEBUILD_SRC_DIR}"
post_build:
commands:
- pwd
- echo "postbuild':' Done..."
""".stripIndent()
withAWS(region: 'ap-southeast-2', role: 'CodeBuildWithJenkinsRole', roleAccount: '123456789123', externalId: '123456-2c1a-4367-aa09-7654321') {
sh 'aws ssm get-parameter --name "/some/token"'
try {
cbResult = awsCodeBuild projectName: 'project-lambda',
sourceControlType: 'project',
credentialsType: 'keys',
awsAccessKey: env.AWS_ACCESS_KEY_ID,
awsSecretKey: env.AWS_SECRET_ACCESS_KEY,
awsSessionToken: env.AWS_SESSION_TOKEN,
region: 'ap-southeast-2',
envVariables: '[ { GITHUB_OWNER, special }, { GITHUB_REPO, project-lambda } ]',
artifactTypeOverride: 'S3',
artifactLocationOverride: 'special-artifacts',
overrideArtifactName: 'True',
buildSpecFile: buildspec
} catch (Exception cbEx) {
cbResult = cbEx.getCodeBuildResult()
}
}
} //script
.
.
.
}