I use AWS Glue and Apache Hudi to replicate data in RDS to S3. If I execute the following job, 2 parquet files (initial one, and updated one) will be generated in the S3 bucket (basePath). In this case, I want only 1 latest file, and would like to delete old one.
Does anyone know how to keep 1 latest file in the bucket?
import sys
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
from awsglue.context import GlueContext
from awsglue.job import Job
## #params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
spark = SparkSession.builder.config('spark.serializer','org.apache.spark.serializer.KryoSerializer').getOrCreate()
sc = spark.sparkContext
glueContext = GlueContext(sc)
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
dataGen = sc._jvm.org.apache.hudi.QuickstartUtils.DataGenerator()
inserts = sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(5))
df = spark.read.json(spark.sparkContext.parallelize(inserts, 2))
df.show()
tableName = 'hudi_mor_athena_sample'
bucketName = 'cm-sato-hudi-sample--datalake'
basePath = f's3://{bucketName}/{tableName}'
hudi_options = {
'hoodie.table.name': tableName,
'hoodie.datasource.write.storage.type': 'MERGE_ON_READ',
'hoodie.compact.inline': 'false',
'hoodie.datasource.write.recordkey.field': 'uuid',
'hoodie.datasource.write.partitionpath.field': 'partitionpath',
'hoodie.datasource.write.table.name': tableName,
'hoodie.datasource.write.operation': 'upsert',
'hoodie.datasource.write.precombine.field': 'ts',
'hoodie.upsert.shuffle.parallelism': 2,
'hoodie.insert.shuffle.parallelism': 2,
}
df.write.format("hudi"). \
options(**hudi_options). \
mode("overwrite"). \
save(basePath)
updates = sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateUpdates(3))
df = spark.read.json(spark.sparkContext.parallelize(updates, 2))
df.show()
# update
df.write.format("hudi"). \
options(**hudi_options). \
mode("append"). \
save(basePath)
job.commit()
Instead of mode("append") use mode("overwrite")
Related
As I understand, Joob Bookmarks prevents the duplicated data. "Enable" updates the data based on the previous data, and "disable" process the entire dataset (does this mean it overrides it? I tried this, but the job took for too long and i'm not sure if it does what i think it does.)
But what if I want to override the Dynamodb Table in the job? I've seen examples where the output data is in S3, but I'm not sure about the DynamoDB.
For example I have a Glue job like this:
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
args = getResolvedOptions(sys.argv, ["JOB_NAME"])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args["JOB_NAME"], args)
# Script generated for node Redshift Cluster
RedshiftCluster_node1 = glueContext.create_dynamic_frame.from_catalog(
database="tr_bbd",
redshift_tmp_dir=args["TempDir"],
table_name="tr_bbd_vendor_info",
transformation_ctx="RedshiftCluster_node1",
)
# Script generated for node ApplyMapping
ApplyMapping_node2 = ApplyMapping.apply(
frame=RedshiftCluster_node1,
mappings=[
("vendor_code", "string", "vendor_code", "string"),
("vendor_group_id", "int", "vendor_group_id", "int"),
("vendor_group_status_name", "string", "vendor_group_status_name", "string")
],
transformation_ctx="ApplyMapping_node2",
)
# Script generated for node DynamoDB bucket
Datasink1 = glueContext.write_dynamic_frame_from_options(
frame=ApplyMapping_node2,
connection_type="dynamodb",
connection_options={
"dynamodb.output.tableName": "VENDOR_TABLE",
"dynamodb.throughput.write.percent": "1.0"
}
)
job.commit()
Thank you.
I have a Glue Script which is trying to read the RDS credentials I have stored in Secrets manager. But the Script keeps on running and never completes.
Also, the IAM Role which this Glue Script is running with contains SecretsManagerReadWrite policy (AWS Managed)
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrameCollection
from awsglue.dynamicframe import DynamicFrame
import boto3
import botocore
from botocore.errorfactory import ClientError
# import org.apache.spark.sql.functions.concat_ws
from pyspark.sql.types import *
from pyspark.sql.functions import udf
from datetime import date
today = date.today()
current_day = today.strftime("%Y%m%d")
def str_to_arr(my_list):
str = ""
for item in my_list:
if item:
str += item
str = str.split(" ")
return '{"' + ' '.join([elem for elem in str]) + '"}'
str_to_arr_udf = udf(str_to_arr,StringType())
def AddPartitionKeys(glueContext, dfc) -> DynamicFrameCollection:
df = dfc.select(list(dfc.keys())[0]).toDF()
df = glueContext.add_ingestion_time_columns(df, "day")
glue_df = DynamicFrame.fromDF(df, glueContext, "transform_date")
return(DynamicFrameCollection({"CustomTransform0": glue_df}, glueContext))
## #params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'days', 's3_bucket', 'rds_endpoint', 'region_name', 'secret_name'])
region_name = args['region_name']
session = boto3.session.Session()
client = session.client("secretsmanager", region_name=region_name)
get_secret_value_response = client.get_secret_value(SecretId=args['secret_name'])
secret = get_secret_value_response['SecretString']
secret = json.loads(secret)
db_username = secret.get('username')
db_password = secret.get('password')
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
print("Below are the creds")
# print("DB USERNAME IS " , db_username)
# print("DB PWD IS " , db_password)
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
job.commit()
What am I missing here?
I checked my work against this blog and yet I am not able to get this script complete successfully.
After Mark's suggestion, I was able to figure out that I had to create a VPC Interface Endpoint for Secrets Manager. The steps are outlined here by AWS, just had to make sure the policy in the endpoint had access/ARNs mentioned of resources I want to access from Secrets Manager.
I am new to AWS Glue. As per AWS Glue documentation, Spigot function will help you to write sample records from a dynamicFrame to an S3 Directory. But when I run this, it is not creating any file under that S3 directory. Any inputs on where I am doing wrong. Below is the test code.
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## #params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "amssurveydb", table_name = "amssurvey", transformation_ctx = "datasource0")
split1 = SplitRows.apply(datasource0, {"count": {">": 50}}, "split11", "split12", transformation_ctx ="split1")## #type: SplitRows
selFromCol1 = SelectFromCollection.apply(dfc = split1, key = "split11", transformation_ctx = "selFromCol1")
selFromCol2 = SelectFromCollection.apply(dfc = split1, key = "split12", transformation_ctx = "selFromCol2")
spigot1 = Spigot.apply(frame = selFromCol1, path = "s3://asgqatestautomation3/SourceFiles/spigot1Op", options = {"topk":5},transformation_ctx ="spigot1")
job.commit()
I'm trying to convert a dataframe to a Dynamic Frame using the toDF and fromDF functions (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-crawler-pyspark-extensions-dynamic-frame.html#aws-glue-api-crawler-pyspark-extensions-dynamic-frame-fromDF) as per the below code snippet:
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## #params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## #type: DataSource
## #args: [database = "test-3", table_name = "test", transformation_ctx = "datasource0"]
## #return: datasource0
## #inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "test-3", table_name = "test", transformation_ctx = "datasource0")
foo = datasource0.toDF()
bar = DynamicFrame.fromDF(foo, glueContext, "bar")
However, I'm getting an error on the line:
bar = DynamicFrame.fromDF(foo, glueContext, "bar")
The error says:
NameError: name 'DynamicFrame' is not defined
I've tried the usual googling to no avail, I can't see what I've done wrong from other examples. Does anyone know why I'm getting this error and how to resolve it?
from awsglue.dynamicframe import DynamicFrame
Import DynamicFrame
You need to import the DynamicFrame class from awsglue.dynamicframe module:
from awsglue.dynamicframe import DynamicFrame
There are lot of things missing in the examples provided with the AWS Glue ETL documentation.
However, you can refer to the following GitHub repository which contains lots of examples for performing basic tasks with Glue ETL:
AWS Glue samples
I want to access hive metastore by running a spark job on AWS Glue. Doing so requires me to put the hive's instance's ip and access it. From my local, it works but not from AWS Glue.
I have tried to access Hive using the following piece of code:
spark_session = (
glueContext.spark_session
.builder
.appName('example-pyspark-read-and-write-from-hive')
.config(
"hive.metastore.uris",
"thrift://172.16.12.34:9083",
conf=SparkConf()
)
.enableHiveSupport()
.getOrCreate()
)
I have also looked at various documentations but none could tell my how to connect to an ec2 instance at a specific port.
The code is:
import sys
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark import SparkConf, SparkContext
from pyspark.conf import SparkConf
from pyspark.context import SparkConf, SparkContext
from pyspark.sql import (DataFrameReader, DataFrameWriter, HiveContext,
SparkSession)
"""
SparkSession ss = SparkSession
.builder()
.appName(" Hive example")
.config("hive.metastore.uris", "thrift://localhost:9083")
.enableHiveSupport()
.getOrCreate();
"""
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark_session = (
glueContext.spark_session
.builder
.appName('example-pyspark-read-and-write-from-hive')
.config(
"hive.metastore.uris",
"thrift://172.16.12.34:9083",
conf=SparkConf()
)
.enableHiveSupport()
.getOrCreate()
)
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
data = [('First', 1), ('Second', 2), ('Third', 3), ('Fourth', 4), ('Fifth', 5)]
df = spark_session.createDataFrame(data)
df.write.saveAsTable('example_2')
job.commit()
I expect to get the table written in Hive but instead I get the following error from Glue:
An error occurred while calling o239.saveAsTable. No Route to Host from ip-172-31-14-64/172.31.14.64 to ip-172-31-15-11.ap-south-1.compute.internal:8020 failed on socket timeout exception: java.net.NoRouteToHostException: No route to host;