Airflow task failing intermittently when using SnowflakeHook - amazon-web-services

I'm using AWS managed Airflow (MWAA) to orchestrate some dags, but some of them are failing in an intermittent way in tasks that use SnowflakeHook provider.
Here is the dag code:
# Standard Library Imports
import logging
from datetime import timedelta, datetime
# 3rd Party Imports
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
# Internal Imports
from DagTasksImpl import raw_ingest, preprocess, stage, get_parameter, send_failed_email
from operators.CustomS3ToSnowflakeOperator import CustomS3ToSnowflakeOperator
from operators.PostprocessingOperator import PostprocessingOperator
from airflow.contrib.operators.sns_publish_operator import SnsPublishOperator
LOGGER = logging.getLogger(__name__)
EXECUTION_DATE = '{{ macros.ds_format(macros.ds_add(yesterday_ds, 7), "%Y-%m-%d", "%Y_%m_%d") }}'
SOURCE_BUCKET = "{{params.get_parameter('/durable/buckets/ce_signal_channel')}}"
RAW_INGEST_BUCKET = "{{params.get_parameter('/durable/buckets/raw')}}"
PROCESSED_BUCKET = "{{params.get_parameter('/durable/buckets/processed')}}"
STAGED_BUCKET = "{{params.get_parameter('/durable/buckets/staged')}}"
DAY_SYM_BRAND_CHANNEL_EMAX_KEY = f"{EXECUTION_DATE}/day_sym_brand_channel_emax_{EXECUTION_DATE}.csv"
DAY_SYM_CHANNEL_EMAX_KEY = f"{EXECUTION_DATE}/day_sym_channel_emax_{EXECUTION_DATE}.csv"
PERIOD_SYM_BRAND_CHANNEL_EMAX_KEY = f"{EXECUTION_DATE}/period_sym_brand_channel_emax_{EXECUTION_DATE}.csv"
PERIOD_SYM_CHANNEL_EMAX_KEY = f"{EXECUTION_DATE}/period_sym_channel_emax_{EXECUTION_DATE}.csv"
SIGNAL_CHANNEL_FILES = [DAY_SYM_BRAND_CHANNEL_EMAX_KEY, DAY_SYM_CHANNEL_EMAX_KEY,
PERIOD_SYM_BRAND_CHANNEL_EMAX_KEY, PERIOD_SYM_CHANNEL_EMAX_KEY]
IS_GZIP = False
DEFAULT_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2021, 5, 16),
'on_failure_callback': send_failed_email,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
def raw_ingest_task_callable(source_bucket, target_bucket, keys, **kwargs):
for key in keys:
LOGGER.info(f"Preparing to ingest data from source: {source_bucket}/{key} to target: {target_bucket}/{key}")
raw_ingest(source_bucket, target_bucket, key, IS_GZIP)
TASK_RAW_INGEST = PythonOperator(
task_id="raw_ingest",
provide_context=True,
python_callable=raw_ingest_task_callable,
op_kwargs={"source_bucket": SOURCE_BUCKET, "target_bucket": RAW_INGEST_BUCKET, "keys": SIGNAL_CHANNEL_FILES},
params={"get_parameter": get_parameter},
dag=CE_TRANSACT_WEEKLY_SIGNAL_CHANNEL_DAG,
)
def preprocess_task_callable(source_bucket, target_bucket, keys, **kwargs):
for key in keys:
LOGGER.info(f"Preparing to ingest data from source: {source_bucket}/{key} to target: {target_bucket}/{key}")
preprocess(source_bucket, target_bucket, key, IS_GZIP)
TASK_PREPROCESS = PythonOperator(
task_id="preprocess",
provide_context=True,
python_callable=preprocess_task_callable,
op_kwargs={"source_bucket": RAW_INGEST_BUCKET, "target_bucket": PROCESSED_BUCKET, "keys": SIGNAL_CHANNEL_FILES},
params={"get_parameter": get_parameter},
dag=CE_TRANSACT_WEEKLY_SIGNAL_CHANNEL_DAG,
)
def stage_task_callable(source_bucket, target_bucket, keys, **kwargs):
for key in keys:
LOGGER.info(f"Preparing to ingest data from source: {source_bucket}/{key} to target: {target_bucket}/{key}")
stage(source_bucket, target_bucket, key, IS_GZIP)
TASK_STAGE = PythonOperator(
task_id="stage",
provide_context=True,
python_callable=stage_task_callable,
op_kwargs={"source_bucket": PROCESSED_BUCKET, "target_bucket": STAGED_BUCKET, "keys": SIGNAL_CHANNEL_FILES},
params={"get_parameter": get_parameter},
dag=CE_TRANSACT_WEEKLY_SIGNAL_CHANNEL_DAG,
)
TASK_COPY_INTO_TABLE_DAY_SYM_BRAND_CHANNEL_EMAX = CustomS3ToSnowflakeOperator(
task_id='copy_into_table_day_sym_brand_channel_emax',
s3_keys=[DAY_SYM_BRAND_CHANNEL_EMAX_KEY],
table="day_sym_brand_channel_emax",
dag=CE_TRANSACT_WEEKLY_SIGNAL_CHANNEL_DAG,
pool="snowflake_pool"
)
TASK_COPY_INTO_TABLE_PERIOD_SYM_CHANNEL_EMAX = CustomS3ToSnowflakeOperator(
task_id='copy_into_table_period_sym_channel_emax',
s3_keys=[PERIOD_SYM_CHANNEL_EMAX_KEY],
table="period_sym_channel_emax",
dag=CE_TRANSACT_WEEKLY_SIGNAL_CHANNEL_DAG,
pool="snowflake_pool"
)
SEND_EMAIL = SnsPublishOperator(
task_id='send_email',
target_arn=get_parameter("/sns/mwaa"),
subject='Transact Weekly Signal Channel Notification',
message=f"Transact Weekly Signal Channel workflow has been successfully completed for the CE Data uploaded on {EXECUTION_DATE}",
dag=CE_TRANSACT_WEEKLY_SIGNAL_CHANNEL_DAG
)
TASK_RAW_INGEST >> TASK_PREPROCESS >> TASK_STAGE >>
TASK_COPY_INTO_TABLE_DAY_SYM_BRAND_CHANNEL_EMAX >> TASK_POSTPROCESS_SYM_BRAND_CHANNEL_EMAX >> SEND_EMAIL
CustomS3ToSnowflakeOperator
from typing import Any, Optional
from airflow.models import BaseOperator
from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook
from airflow.utils.decorators import apply_defaults
import boto3
class CustomS3ToSnowflakeOperator(BaseOperator):
template_fields = ('s3_keys',)
sum_column = "spend_amount_90D"
#apply_defaults
def __init__(
self,
*,
s3_keys,
table: str,
stage: Any = 'consumer_edge_stage',
file_format: str = "(type = csv field_delimiter = ',' skip_header = 1 field_optionally_enclosed_by = '\"')",
schema: str = 'PUBLIC',
gzip: Optional[bool] = False,
autocommit: bool = True,
snowflake_conn_id: str = 'snowflake_connection_id',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_keys = s3_keys
self.table = table
self.stage = stage
self.file_format = file_format
self.schema = schema
self.gzip = gzip
self.autocommit = autocommit
self.snowflake_conn_id = snowflake_conn_id
self.temp_table = f"{table}_tmp"
def execute(self, context: Any) -> None:
snowflake_hook = SnowflakeHook(snowflake_conn_id=self.snowflake_conn_id)
self.wipe_table(snowflake_hook)
if "day" in self.table:
self.create_temp_table(snowflake_hook)
self.copy_into_table(self.get_files(), snowflake_hook, self.temp_table)
else:
self.copy_into_table(self.get_files(), snowflake_hook, self.table)
def wipe_table(self, snowflake_hook):
wipe_query = f"truncate table if exists {self.table}"
self.log.info('Wiping table...')
snowflake_hook.run(wipe_query, self.autocommit)
self.log.info('Truncate command complete...')
def create_temp_table(self, snowflake_hook):
create_temp_table_query = f"create or replace table {self.temp_table} clone {self.table}"
self.log.info('Creating table...')
snowflake_hook.run(create_temp_table_query, self.autocommit)
self.log.info('Create table command complete...')
drop_column_query = f"alter table {self.temp_table} drop column {self.sum_column}"
snowflake_hook.run(drop_column_query, self.autocommit)
def get_files(self):
if self.gzip:
bucket_and_key = str(self.s3_keys)
bucket_and_key = bucket_and_key.replace('[', '')
bucket_and_key = bucket_and_key.replace(']', '')
bucket_and_key = bucket_and_key.replace("'", '')
s3 = boto3.resource('s3')
path_parts = bucket_and_key.replace("s3://", "").split("/")
bucket = path_parts.pop(0)
key = "/".join(path_parts)
source_bucket = s3.Bucket(bucket)
files_list = []
for obj in source_bucket.objects.filter(Prefix=key):
files_list.append(obj.key)
files = str(files_list)
else:
files = str(self.s3_keys)
files = files.replace('[', '(')
files = files.replace(']', ')')
return files
def copy_into_table(self, files, snowflake_hook, table):
base_sql = """
FROM #{stage}/
files={files}
file_format={file_format}
""".format(
stage=self.stage, files=files, file_format=self.file_format
)
copy_query = """
COPY INTO {schema}.{table} {base_sql}
""".format(
schema=self.schema, table=table, base_sql=base_sql
)
self.log.info('Executing COPY command...')
snowflake_hook.run(copy_query, self.autocommit)
self.log.info("COPY command completed")
Here are the logs from the failing tasks:
*** Reading remote log from Cloudwatch log_group: airflow-ConsumerEdgeAirflowEnvironment-Task log_stream: ce_transact_weekly_signal_channel_2/copy_into_table_day_sym_brand_channel_emax/2022-12-05T14_00_00+00_00/1.log.
[2022-12-12 14:02:13,653] {{taskinstance.py:670}} INFO - Dependencies all met for <TaskInstance: ce_transact_weekly_signal_channel_2.copy_into_table_day_sym_brand_channel_emax 2022-12-05T14:00:00+00:00 [queued]>
[2022-12-12 14:02:13,887] {{taskinstance.py:670}} INFO - Dependencies all met for <TaskInstance: ce_transact_weekly_signal_channel_2.copy_into_table_day_sym_brand_channel_emax 2022-12-05T14:00:00+00:00 [queued]>
[2022-12-12 14:02:13,929] {{taskinstance.py:880}} INFO -
--------------------------------------------------------------------------------
[2022-12-12 14:02:13,946] {{taskinstance.py:881}} INFO - Starting attempt 1 of 2
[2022-12-12 14:02:13,967] {{taskinstance.py:882}} INFO -
--------------------------------------------------------------------------------
[2022-12-12 14:02:14,052] {{taskinstance.py:901}} INFO - Executing <Task(CustomS3ToSnowflakeOperator): copy_into_table_day_sym_brand_channel_emax> on 2022-12-05T14:00:00+00:00
[2022-12-12 14:02:14,088] {{standard_task_runner.py:54}} INFO - Started process 12690 to run task
[2022-12-12 14:02:14,165] {{standard_task_runner.py:77}} INFO - Running: ['airflow', 'run', 'ce_transact_weekly_signal_channel_2', 'copy_into_table_day_sym_brand_channel_emax', '2022-12-05T14:00:00+00:00', '--job_id', '11386', '--pool', 'default_pool', '--raw', '-sd', 'DAGS_FOLDER/CeTransactWeeklySignalChannel.py', '--cfg_path', '/tmp/tmptw5qhtdm']
[2022-12-12 14:02:14,975] {{standard_task_runner.py:78}} INFO - Job 11386: Subtask copy_into_table_day_sym_brand_channel_emax
[2022-12-12 14:02:15,640] {{logging_mixin.py:112}} INFO - Running %s on host %s <TaskInstance: ce_transact_weekly_signal_channel_2.copy_into_table_day_sym_brand_channel_emax 2022-12-05T14:00:00+00:00 [running]> ip-10-192-21-170.us-east-2.compute.internal
[2022-12-12 14:02:15,943] {{CustomS3ToSnowflakeOperator.py:49}} INFO - Wiping table...
[2022-12-12 14:02:16,037] {{connection.py:280}} INFO - Snowflake Connector for Python Version: 2.8.2, Python Version: 3.7.10, Platform: Linux-4.14.294-220.533.amzn2.x86_64-x86_64-with-glibc2.2.5
[2022-12-12 14:02:16,055] {{connection.py:935}} INFO - This connection is in OCSP Fail Open Mode. TLS Certificates would be checked for validity and revocation status. Any other Certificate Revocation related exceptions or OCSP Responder failures would be disregarded in favor of connectivity.
[2022-12-12 14:02:16,073] {{connection.py:952}} INFO - Setting use_openssl_only mode to False
[2022-12-12 14:02:28,275] {{logging_mixin.py:112}} WARNING - Exception ignored in: <function BaseFileLock.__del__ at 0x7fba82bbf5f0>
[2022-12-12 14:02:28,467] {{logging_mixin.py:112}} WARNING - Traceback (most recent call last):
[2022-12-12 14:02:28,527] {{logging_mixin.py:112}} WARNING - File "/usr/local/airflow/.local/lib/python3.7/site-packages/filelock/_api.py", line 240, in __del__
[2022-12-12 14:02:28,548] {{logging_mixin.py:112}} WARNING - self.release(force=True)
[2022-12-12 14:02:28,571] {{logging_mixin.py:112}} WARNING - File "/usr/local/airflow/.local/lib/python3.7/site-packages/filelock/_api.py", line 201, in release
[2022-12-12 14:02:28,628] {{logging_mixin.py:112}} WARNING - with self._thread_lock:
[2022-12-12 14:02:28,646] {{logging_mixin.py:112}} WARNING - AttributeError: 'UnixFileLock' object has no attribute '_thread_lock'
Does anyone faced some similar error?
Usually after one or two retries of the failed tasks make them succeed. Because of this intermittent intermittent character and the BaseLock mention in the logs I thogth it was related to some concurrency problem. But even eliminating all the concurrency between tasks on the airflow instance (by limiting the pool slots to 1) didn't solved the problem.

Related

Airflow DataprocSubmitJobOperator - how to pass data between tasks using XCOMS or other alternate ways

I'm using DataprocSubmitJobOperator in Airflow and am trying to pass data between tasks.
Here is the dag:
with models.DAG(
'Versa-kafka2mongo',
# Continue to run DAG twice per day
default_args=default_dag_args,
#schedule_interval='*/10 * * * *',
# schedule_interval='30 11 * * *',
schedule_interval=None,
catchup=False,
) as dag:
# create_dataproc_cluster
create_dataproc_cluster = DataprocCreateClusterOperator(
task_id="create_dataproc_cluster",
cluster_name=CLUSTER_NAME,
region=REGION,
cluster_config=CLUSTER_GENERATOR_CONFIG
)
run_dataproc_spark_getcutomers= DataprocSubmitJobOperator(
task_id="run_dataproc_spark_getcutomers",
job=PYSPARK_JOB_GETCUSTOMERS,
location=REGION,
project_id=PROJECT_ID,
)
# alarmBlock
run_dataproc_spark_alarmblock = DataprocSubmitJobOperator(
task_id="run_dataproc_spark_alarmblock",
job=PYSPARK_JOB_ALARMBLOCK,
location=REGION,
project_id=PROJECT_ID,
)
# # insights
run_dataproc_spark_insights = DataprocSubmitJobOperator(
task_id="run_dataproc_spark_insights",
job=PYSPARK_JOB_INSIGHTS,
location=REGION,
project_id=PROJECT_ID,
)
# few other tasks
delete_dataproc_cluster = DataprocDeleteClusterOperator(
task_id="delete_dataproc_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
# trigger_rule="all_done"
trigger_rule=trigger_rule.TriggerRule.ALL_DONE
)
create_dataproc_cluster >> run_dataproc_spark_getcutomers >> [run_dataproc_spark_insights, run_dataproc_spark_alarmblock] >> delete_dataproc_cluster
run_dataproc_spark_getcutomers - uses Pyspark code to get data from Mongo, which is used by the subsequent tasks using DataprocSubmitJobOperator i.e. run_dataproc_spark_insights, run_dataproc_spark_alarmblock (and few other tasks not listed)
Objective is to pass the customer list to the tasks - run_dataproc_spark_insights, run_dataproc_spark_alarmblock.
How do i do that ?
I was trying to use XCOMS, but it is giving me error - pls see code below
code for task - run_dataproc_spark_getcutomers,
which corresponds to job=PYSPARK_JOB_GETCUSTOMERS
This uses DataprocSubmitJobOperator, as shown in the Dag
# read from kafka, put into mongo
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, from_json
from pyspark.sql.types import StructType, StructField, StringType, TimestampType, LongType, IntegerType
import configparser,sys
parser = configparser.ConfigParser()
print(" parser ", parser)
read_params = parser.read('params.cfg')
print(f" after reading params.cfg, {read_params} ")
mongoConnUri = parser.get('mongo', 'mongoConnUri') + "?retryWrites=true&w=majority"
def main(**kwargs):
if kwargs is None:
kwargs = sys.argv
spark = SparkSession.builder.appName('kafka2mongo').getOrCreate()
# spark.newSession
cust_db = parser.get('mongo', 'customer_db')
customer_collection = parser.get('mongo', 'customer_collection')
print(f" cust_db : {cust_db}, customer_collection : {customer_collection}")
dfm = spark.read \
.format('mongo') \
.option('database', cust_db) \
.option('collection', customer_collection) \
.option('uri', mongoConnUri) \
.load()
dfm.show(5, False)
if dfm and dfm.count() > 0:
dfm = dfm.select('customerName')
custList = dfm.rdd.map(lambda x: x[0]).collect()
kwargs['ti'].xcom_push(key='customers',value=custList)
if __name__=="__main__":
sys.exit(main())
Error :
Traceback (most recent call last):
File "/tmp/d6f5c321-fd5c-4287-9a6d-f2a05db40596/getcustomers.py", line 44, in <module>
sys.exit(main())
File "/tmp/d6f5c321-fd5c-4287-9a6d-f2a05db40596/getcustomers.py", line 41, in main
kwargs['ti'].xcom_push(key='customers',value=custList)
KeyError: 'ti'
Any inputs on how to debug/fix this ?
tia!
You can save the output of dataproc in google cloud storage and retrieve it from there:
https://cloud.google.com/dataproc/docs/guides/dataproc-job-output#cloud-storage

How to deploy the Deep learning model(computer vision ) in aws using lambda

I have trained in background removal with my custom images and I am new to deploying computer vision models. please, anyone, share the blog how to deploy the Pytorch deep learning model(computer vision) using lambda.
I have written some lambda functions to take the input image and predict the segment and give output as a background removal image.
I am not sure this function is correct or not. please check this function as well.
Define imports
try:
import unzip_requirements
except ImportError:
pass
import json
from io import BytesIO
import time
import os
import base64
import boto3
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
from preprocessing import RescaleT, ToTensorLab
import torch
import numpy as np
from PIL import Image
from network.u2net import U2NET
# Define two functions inside handler.py: img_to_base64_str to
# convert binary images to base64 format and load_models to
# load the four pretrained model inside a dictionary and then
# keep them in memory
def img_to_base64_str(img):
buffered = BytesIO()
img.save(buffered, format="PNG")
buffered.seek(0)
img_byte = buffered.getvalue()
img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()
return img_str
def load_models(s3, bucket):
model = U2NET(3,1)
response = s3.get_object(
Bucket=bucket, Key=f"models/u2net/u2net.pth")
state = torch.load(BytesIO(response["Body"].read()),map_location=torch.device('cpu'))
model.load_state_dict(state)
model.eval()
return model
def preprocess_raw_img(raw_img_array):
"""
This function preprocesses a raw input array in a way such that it can be fed into the U-2-Net architecture
:param raw_img_array:
:return:
"""
rescaler = RescaleT(320)
rescaled_img = rescaler(raw_img_array)
tensor_converter = ToTensorLab(flag=0)
tensor_img = tensor_converter(rescaled_img)
tensor_img = tensor_img.unsqueeze(0)
return tensor_img
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def resize_img_to_orig(prediction_np, orig_img):
image = Image.fromarray(prediction_np * 255).convert('RGB')
image_original = image.resize((orig_img.shape[1], orig_img.shape[0]), resample=Image.BILINEAR)
return image_original
def mask_to_orig_size(orig_img, rescale, threshold):
mask_orig_size = np.array(orig_img, dtype=np.float64)
mask_orig_size /= rescale
mask_orig_size[mask_orig_size > threshold] = 1
mask_orig_size[mask_orig_size <= threshold] = 0
return mask_orig_size
def extract_foreground(mask_orig_size):
shape = mask_orig_size.shape
a_layer_init = np.ones(shape=(shape[0], shape[1], 1))
mul_layer = np.expand_dims(mask_orig_size[:, :, 0], axis=2)
a_layer = mul_layer * a_layer_init
rgba_out = np.append(mask_orig_size, a_layer, axis=2)
return rgba_out
def input_to_rgba_inp(input_arr, rescale):
input_arr = np.array(input_arr, dtype=np.float64)
shape = input_arr.shape
input_arr /= rescale
a_layer = np.ones(shape=(shape[0], shape[1], 1))
rgba_inp = np.append(input_arr, a_layer, axis=2)
return rgba_inp
def u2net_api_call(raw_img_array, model):
"""
This function takes as input an image array of any size. The goal is to return only the object in the foreground of
the image.
Therefore, the raw input image is preprocessed, fed into the deep learning model. Afterwards the foreground of the
original image is extracted from the mask which was generated by the deep learning model.
"""
THRESHOLD = 0.9
RESCALE = 255
preprocessed_img = preprocess_raw_img(raw_img_array)
d1, d2, d3, d4, d5, d6, d7 = model(preprocessed_img)
prediction = d1[:, 0, :, :]
prediction = normPRED(prediction)
prediction_np = prediction.squeeze().cpu().data.numpy()
img_orig_size = resize_img_to_orig(prediction_np, raw_img_array)
mask_orig_size = mask_to_orig_size(img_orig_size, RESCALE, THRESHOLD)
rgba_out = extract_foreground(mask_orig_size)
rgba_inp = input_to_rgba_inp(raw_img_array, RESCALE)
rem_back = (rgba_inp * rgba_out)
return rem_back
s3 = boto3.client("s3")
bucket = "sagemaker-m-model"
model = load_models(s3, bucket)
def lambda_handler(event,Context):
if event.get("source") in ["aws.events", "serverless-plugin-warmup"]:
print('Lambda is warm!')
return {}
data = json.loads(event["body"])
print("data keys :", data.keys())
image = data["image"]
image = image[image.find(",")+1:]
dec = base64.b64decode(image + "===")
image = Image.open(io.BytesIO(dec))
#image = image.convert("RGB")
# loading the model with the selected style based on the model_id payload
model = model
# resize the image based on the load_size payload
#load_size = int(data["load_size"])
with torch.no_grad():
background_removed = u2net_api_call(image, model)
output_image = background_removed[0]
# deprocess, (0, 1)
output_image = output_image.data.cpu().float() * 0.5 + 0.5
output_image = output_image.numpy()
output_image = np.uint8(output_image.transpose(1, 2, 0) * 255)
output_image = Image.fromarray(background_removed)
# convert the PIL image to base64
result = {
"output": img_to_base64_str(output_image)
}
# send the result back to the client inside the body field
return {
"statusCode": 200,
"body": json.dumps(result),
"headers": {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
}
I have tried with the Serverless framework, I got some errors. I understand how to solve this.
Running "serverless" from node_modules
Warning: Invalid configuration encountered
at 'custom.warmup.events': must be object
at 'custom.warmup.timeout': must be object
at 'functions.transformImage.warmup': must be object
Learn more about configuration validation here: http://slss.io/configuration-validation
Deploying br to stage dev (us-east-1)
Warning: WarmUp: Skipping warmer "events" creation. No functions to warm up.
Warning: WarmUp: Skipping warmer "timeout" creation. No functions to warm up.
✖ Stack br-dev failed to deploy (11s)
Environment: linux, node 16.14.0, framework 3.4.0 (local) 3.4.0v (global), plugin 6.1.2, SDK 4.3.1
Docs: docs.serverless.com
Support: forum.serverless.com
Bugs: github.com/serverless/serverless/issues
Error:
Error: `docker run --rm -v /home/suri/project1/rmbg/br/cache/cf58e2124c894818b4beab8df9ac26ac92eeb326c8c74fc7e60e8f08ea86df1e_x86_64_slspyc:/var/task:z -v /home/suri/project1/rmbg/br/cache/downloadCacheslspyc:/var/useDownloadCache:z lambci/lambda:build-python3.6 /bin/sh -c chown -R 0\:0 /var/useDownloadCache && python3.6 -m pip install -t /var/task/ -r /var/task/requirements.txt --cache-dir /var/useDownloadCache && chown -R 0\:0 /var/task && chown -R 0\:0 /var/useDownloadCache` Exited with code 1
at ChildProcess.<anonymous> (/home/suri/project1/rmbg/br/node_modules/child-process-ext/spawn.js:38:8)
at ChildProcess.emit (node:events:520:28)
at ChildProcess.emit (node:domain:475:12)
at maybeClose (node:internal/child_process:1092:16)
at Process.ChildProcess._handle.onexit (node:internal/child_process:302:5)
3 deprecations found: run 'serverless doctor' for more details

Apache Airflow S3ListOperator not listing files

I am trying to use the airflow.providers.amazon.aws.operators.s3_list S3ListOperator to list files in an S3 bucket in my AWS account with the DAG operator below:
list_bucket = S3ListOperator(
task_id = 'list_files_in_bucket',
bucket = '<MY_BUCKET>',
aws_conn_id = 's3_default'
)
I have configured my Extra Connection details in the form of: {"aws_access_key_id": "<MY_ACCESS_KEY>", "aws_secret_access_key": "<MY_SECRET_KEY>"}
When I run my Airflow job, it appears it is executing fine & my task status is Success. Here is the Log output:
[2021-04-27 11:44:50,009] {base_aws.py:368} INFO - Airflow Connection: aws_conn_id=s3_default
[2021-04-27 11:44:50,013] {base_aws.py:170} INFO - Credentials retrieved from extra_config
[2021-04-27 11:44:50,013] {base_aws.py:84} INFO - Creating session with aws_access_key_id=<MY_ACCESS_KEY> region_name=None
[2021-04-27 11:44:50,027] {base_aws.py:157} INFO - role_arn is None
[2021-04-27 11:44:50,661] {taskinstance.py:1185} INFO - Marking task as SUCCESS. dag_id=two_step, task_id=list_files_in_bucket, execution_date=20210427T184422, start_date=20210427T184439, end_date=20210427T184450
[2021-04-27 11:44:50,676] {taskinstance.py:1246} INFO - 0 downstream tasks scheduled from follow-on schedule check
[2021-04-27 11:44:50,700] {local_task_job.py:146} INFO - Task exited with return code 0
Is there anything I can do to print the files in my bucket to Logs?
TIA
This code is enough and you don't need to use print function. Just check the corresponding log, then go to xcom, and the return list is there.
list_bucket = S3ListOperator(
task_id='list_files_in_bucket',
bucket='ob-air-pre',
prefix='data/',
delimiter='/',
aws_conn_id='aws'
)
The result from executing S3ListOperator is an XCom object that is stored in the Airflow database after the task instance has completed.
You need to declare another operator to feed in the results from the S3ListOperator and print them out.
For example in Airflow 2.0.0 and up you can use TaskFlow:
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
dag = DAG(
dag_id='my-workflow',
start_date=timezone.parse('2021-01-14 21:00')
)
#dag.task(task_id="print_objects")
def print_objects(objects):
print(objects)
list_bucket = S3ListOperator(
task_id='list_files_in_bucket',
bucket='<MY_BUCKET>',
aws_conn_id='s3_default',
dag=dag
)
print_objects(list_bucket.output)
In older versions,
from airflow.models import DAG
from airflow.operators.python import PythonOperator
from airflow.utils import timezone
dag = DAG(
dag_id='my-workflow',
start_date=timezone.parse('2021-01-14 21:00')
)
def print_objects(objects):
print(objects)
list_bucket = S3ListOperator(
dag=dag,
task_id='list_files_in_bucket',
bucket='<MY_BUCKET>',
aws_conn_id='s3_default',
)
print_objects_in_bucket = PythonOperator(
dag=dag,
task_id='print_objects_in_bucket',
python_callable=print_objects,
op_args=("{{ti.xcom_pull(task_ids='list_files_in_bucket')}}",)
)
list_bucket >> print_objects_in_bucket

How to change host value for ansible while running using ansible-python module?

Here is my code, where I am passing value for host but it is executing command at localhost. I also tried passing hardcoded values and in output it's showing that it ran on that given host.
Here is the code :-
class defination and then,
def __init__(self):
self.variable_manager = VariableManager()
self.loader = DataLoader()
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list="host")
Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
self.options = Options(listtags=False, listtasks=False, listhosts=True, syntax=False, connection='local', module_path=None, forks=100, remote_user='ubuntu', private_key_file="/tmp/xxx-key2.pem", ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=False, become_method=None, become_user='root', verbosity=None, check=False)
def execute_playbook(self, playbook, host, scriptname=None, command=None,
path=None, username=None, password=None, key=None):
if not os.path.exists(playbook):
print '[INFO] The playbook does not exist'
sys.exit()
script_path = None
if scriptname is not None:
script_path = os.getcwd() + '/' + scriptname
if not os.path.exists(script_path):
print '[INFO] The script does not exist'
sys.exit()
self.variable_manager.extra_vars = {'scriptname': script_path,
'host': host, 'command': command, 'path': path} # This can accomodate various other command line arguments.`
passwords = {}
if password is not None:
self.loader.set_vault_password(password)
play_source = dict(
name = "Ansible Play",
hosts = host,
gather_facts = 'no',
tasks = [
dict(action=dict(module='shell', args='sudo mkdir /tmp/test-ansible'), register='shell_out'),
dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}')))
]
)
play = Play.load(play_source, self.variable_manager, self.loader)
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=passwords,
)
try:
result = tqm.run(play)
except Exception as e:
print e, "Exception in Ansible tqm.run()"
Output is :-
PLAY [Ansible Play] *************************************************************************************************************************
TASK [command] ******************************************************************************************************************************
[WARNING]: Consider using 'become', 'become_method', and 'become_user' rather than running sudo
changed: [110.110.112.139]
TASK [debug] ********************************************************************************************************************************
ok: [110.110.112.139] => {
"msg": ""
}
But it creates directory in my localhost not on "110.110.112.139".
You set connection='local' as option inside __init__.
This means to Ansible that no matter what is the target host, execute tasks on localhost.
Don't set it (leave default) or use ssh to execute tasks remotely.
def __init__(self):
self.variable_manager = VariableManager()
self.loader = DataLoader()
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list="host")
Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
self.options = Options(listtags=False,
listtasks=False,
listhosts=True,
syntax=False,
**connection='ssh'**,
module_path=None,
forks=100, remote_user='ubuntu',
private_key_file="/tmp/xxx-key2.pem",
ssh_common_args=None,
ssh_extra_args=None,
sftp_extra_args=None,
scp_extra_args=None,
become=False,
become_method=None,
become_user='root', verbosity=None,
check=False
)

Uploading video to YouTube and adding it to playlist using YouTube Data API v3 in Python

I wrote a script to upload a video to YouTube using YouTube Data API v3 in the python with help of example given in Example code.
And I wrote another script to add uploaded video to playlist using same YouTube Data API v3 you can be seen here
After that I wrote a single script to upload video and add that video to playlist. In that I took care of authentication and scops still I am getting permission error. here is my new script
#!/usr/bin/python
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
CLIENT_SECRETS_FILE = "client_secrets.json"
# A limited OAuth 2 access scope that allows for uploading files, but not other
# types of account access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# Helpful message to display if the CLIENT_SECRETS_FILE is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://code.google.com/apis/console#access
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
def get_authenticated_service():
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(title,description,keywords,privacyStatus,file):
youtube = get_authenticated_service()
tags = None
if keywords:
tags = keywords.split(",")
insert_request = youtube.videos().insert(
part="snippet,status",
body=dict(
snippet=dict(
title=title,
description=description,
tags=tags,
categoryId='26'
),
status=dict(
privacyStatus=privacyStatus
)
),
# chunksize=-1 means that the entire file will be uploaded in a single
# HTTP request. (If the upload fails, it will still be retried where it
# left off.) This is usually a best practice, but if you're using Python
# older than 2.6 or if you're running on App Engine, you should set the
# chunksize to something like 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(file, chunksize=-1, resumable=True)
)
vid=resumable_upload(insert_request)
#Here I added lines to add video to playlist
#add_video_to_playlist(youtube,vid,"PL2JW1S4IMwYubm06iDKfDsmWVB-J8funQ")
#youtube = get_authenticated_service()
add_video_request=youtube.playlistItems().insert(
part="snippet",
body={
'snippet': {
'playlistId': "PL2JW1S4IMwYubm06iDKfDsmWVB-J8funQ",
'resourceId': {
'kind': 'youtube#video',
'videoId': vid
}
#'position': 0
}
}
).execute()
def resumable_upload(insert_request):
response = None
error = None
retry = 0
vid=None
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if 'id' in response:
print "'%s' (video id: %s) was successfully uploaded." % (
title, response['id'])
vid=response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
return vid
if __name__ == '__main__':
title="sample title"
description="sample description"
keywords="keyword1,keyword2,keyword3"
privacyStatus="public"
file="myfile.mp4"
vid=initialize_upload(title,description,keywords,privacyStatus,file)
print 'video ID is :',vid
I am not able to figure out what is wrong. I am getting permission error. both script works fine independently.
could anyone help me figure out where I am wrong or how to achieve uploading video and adding that too playlist.
I got the answer actually in both the independent script scope is different.
scope for uploading is "https://www.googleapis.com/auth/youtube.upload"
scope for adding to playlist is "https://www.googleapis.com/auth/youtube"
as scope is different so I had to handle authentication separately.