I am creating/updating a set in dynamodb with multiple threads. This is the code I am using
# sends a request to add or update
def update(key, value_to_be_added_to_set):
# creates a key and add value to the mySet column if it doesn't exist
# else it will just add value to mySet
response = table.update_item(
Key={
'key_name': key
},
UpdateExpression='ADD mySet :val',
ExpressionAttributeValues={
':val': {value_to_be_added_to_set}
},
ReturnConsumedCapacity='INDEXES'
)
return response
I couldn't find anything in AWS documentation as to whether this operation guarantees thread safety. That is if I add [value=1] and add [value=2] to a set, the result should always be value={1,2}.
So I wrote this script to test it.
import threading
from random import randrange
import boto3
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('my-test')
key = f'test{randrange(1, 1000000000)}'
max_concurrency = 50
multiplier = 10
# sends a request to add or update
def update(key, value_to_be_added_to_set):
# this call will create a key and add value to the mySet column if it doesn't exist
# else it will add value to mySet
response = table.update_item(
Key={
'key_name': key
},
UpdateExpression='ADD mySet :val',
ExpressionAttributeValues={
':val': {value_to_be_added_to_set}
},
ReturnConsumedCapacity='INDEXES'
)
return response
# this method will be called by every thread
# every thread receives a unique number start from 1 to 50
def create_or_update_values(num):
start = num * multiplier
# if the thread receives 0, it will add the add values to the set from 1 to 10
# similarly thread 2 will add values from 11 to 20
# ..
# thread 49 will add values from 491 to 500
for i in range(start + 1, start + multiplier + 1):
resp = update(key, i)
print(f"Thread {i} has finished")
threads = []
# spin up threads
for i in range(0, max_concurrency):
t = threading.Thread(target=create_or_update_values, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
print("All threads have finished.")
# get mySet, convert it to list and sort it
l = list(table.get_item(Key={
'key_name': key
}, )['Item']['mySet'])
l.sort()
# verify if list contains values from 1 to 500
for i in range(1, max_concurrency * multiplier + 1):
assert int(l[i]) == i
This test passes every time it is run.
Assuming I update 50 identical keys at the same time, can I safely assume the thread safety here?
DynamoDB Architecture
DynamoDB stores items in partitions, which are located on servers known as storage nodes.
DynamoDB follows a leader/follower architecture in which all writes (and strongly consistent reads) are served by the leader node for that partition group.
Serialized Writes
All writes are serialized by the leader node, meaning all updates will happen in order as they are received by the node. The changes are then replicated to the follower nodes in an eventually consistent manner.
Serializable isolation ensures that the results of multiple concurrent operations are the same as if no operation begins until the previous one has finished. src
For more information on DynamoDB Architecture, please refer to this YouTube Video
Yes, individual item updates are serialized.
Import xls file (more than 5000 lines) into my sqlite database takes so long.
def importeradsl(request):
if "GET" == request.method:
else:
excel_file = request.FILES["excel_file"]
#you may put validations here to check extension or file size
wb = openpyxl.load_workbook(excel_file)
#getting a particular sheet by name out of many sheets
worksheet = wb["Sheet 1"]
#iterating over the rows and getting value from each cell in row
for row in worksheet.iter_rows(min_row=2):
row_data = list()
for cell in row:
row_data.append(str(cell.value))
#Get content fields DerangementCuivre models
#Client
nd = row_data[0]
nom_client = row_data[3]
nd_contact = row_data[4]
#Categorie
code_categorie = row_data[6]
acces_reseau = row_data[8]
etat = row_data[9]
origine = row_data[10]
code_sig = row_data[11]
agent_sig = row_data[13]
date_sig = dt.datetime.strftime(parse(row_data[14]), '%Y-%m-%d %H:%M:%S')
date_essai = dt.datetime.strftime(parse(row_data[15]), '%Y-%m-%d %H:%M:%S')
agent_essai = row_data[18]
try:
date_ori = dt.datetime.strptime(row_data[19], '%Y-%m-%d %H:%M:%S')
except ValueError as e:
print ("Vous", e)
else:
date_ori = dt.datetime.strftime(parse(row_data[19]), '%Y-%m-%d %H:%M:%S')
agent_ori = row_data[20]
code_ui = row_data[21]
equipe = row_data[22]
sous_traitant = row_data[23]
date_pla = dt.datetime.strftime(parse(row_data[24]), '%Y-%m-%d %H:%M:%S')
date_rel = dt.datetime.strftime(parse(row_data[25]), '%Y-%m-%d %H:%M:%S')
date_releve = dt.datetime.strptime(row_data[25], '%Y-%m-%d %H:%M:%S')
date_essais = dt.datetime.strptime(row_data[15], '%Y-%m-%d %H:%M:%S')
pst = pytz.timezone('Africa/Dakar')
date_releve = pst.localize(date_releve)
utc = pytz.UTC
date_releve = date_releve.astimezone(utc)
date_essais = pst.localize(date_essais)
date_essais = date_essais.astimezone(utc)
code_rel = row_data[26]
localisation = row_data[27]
cause = row_data[28]
commentaire = row_data[29]
agent_releve = row_data[30]
centre_racc = row_data[32]
rep = row_data[33]
srp = row_data[34]
delai = (date_releve - date_essais).total_seconds()
dali = divmod(delai, 86400)[0]
semaine = date_releve.isocalendar()[1]
mois = date_releve.month
annee = date_releve.year
if dali > 7:
etats = "PEX PLUS"
else:
etats = "PEX"
#Enregistrer un client
Client(nd=nd, nom=nom_client, mobile=nd_contact).save()
#Enregistrer la categorie
#Code pour nom categorie - renseigner plus tard
Categorie(code_categorie=code_categorie, nom="Public").save()
#Enregistrer agent de signalisation
AgentSig(matricule=agent_sig, nom="Awa").save()
#Enregistrer agent d'essai
AgentEssai(matricule=agent_essai).save()
#Enregister agent d'orientation
AgentOri(matricule=agent_ori).save()
#Enregistrer agent de relève
AgentRel(matricule=agent_releve).save()
#Enregistrer le sous-traitant
SousTraitant(nom=sous_traitant).save()
#Enregistrer le centre
Centre(code=centre_racc).save()
#Enregistrer ui
UniteIntervention(code_ui=code_ui,
sous_traitant=SousTraitant.objects.get(nom=sous_traitant)).save()
#Enregistrer le repartiteur
Repartiteur(code=rep, crac=Centre.objects.get(code=centre_racc)).save()
#Enregistrer team
Equipe(nom=equipe, unite=UniteIntervention.objects.get(code_ui=code_ui)).save()
#Enregistrer le SR
SousRepartiteur(code=srp, rep=Repartiteur.objects.get(code=rep)).save()
#Enregistrer le drangement
DerangementAdsl(acces_reseau=acces_reseau,
nd_client=Client.objects.get(nd=nd),
categorie=Categorie(code_categorie=code_categorie),
etat=etat,
origine=origine,
code_sig=code_sig,
agent_sig=AgentSig.objects.get(matricule=agent_sig),
date_sig=date_sig,
date_essai=date_essai,
agent_essai=AgentEssai.objects.get(matricule=agent_essai),
date_ori=date_ori,
agent_ori=AgentOri.objects.get(matricule=agent_ori),
sous_traitant=SousTraitant.objects.get(nom=sous_traitant),
unite_int = UniteIntervention.objects.get(code_ui=code_ui),
date_pla=date_pla,
date_rel=date_rel,
code_rel=code_rel,
code_local=localisation,
cause=cause,
comment_cause=commentaire,
agent_rel=AgentRel.objects.get(matricule=agent_releve),
centre=Centre.objects.get(code=centre_racc),
rep=Repartiteur.objects.get(code=rep),
srep=SousRepartiteur.objects.get(code=srp),
delai=dali,
etat_vr=etats,
semaine=semaine,
mois=mois,
annee=annee).save()
There are few things that are incorrect.
I propose to you the following approach:
Make your code more readable
Remove useless queries
Avoid related records duplication
Cache out your related instances.
Use bulk_create
Looking at your code, with a rough estimation, per csv record, you will get over 30 SQL queries per row, that's a bit much...
1. Make you code more readable.
Your parsing logic can be DRYed, a lot.
First, identify what you do with your data.
From my point of view, 2 main functions:
Do nothing:
def no_transformation(value)
return str(value)
Parse dates
def strptime(value):
"""
I can't really tell what your 'parse' function does, I let it be but it might
be interesting adding your logic in here
"""
return dt.datetime.strptime(parse(str(value)), '%Y-%m-%d %H:%M:%S')
Now, you can declare your parser configuration:
PARSER_CONFIG=(
#(column_index, variable_name, transformation_function)
(0,'nd',no_transformation),
(10,'origine',no_transformation),
(11,'code_sig',no_transformation),
(13,'agent_sig',no_transformation),
(14,'date_sig',strptime),
(15,'date_essai',strptime),
(18,'agent_essai',no_transformation),
(19,'date_ori',strptime),
(20,'agent_ori',no_transformation),
(21,'code_ui',no_transformation),
(22,'equipe',no_transformation),
(23,'sous_traitant',no_transformation),
(24,'date_pla',strptime),
(25,'date_rel',strptime),
(26,'code_rel',no_transformation),
(27,'localisation',no_transformation),
(28,'cause',no_transformation),
(29,'commentaire',no_transformation),
(3,'nom_client',no_transformation),
(30,'agent_releve',no_transformation),
(32,'centre_racc',no_transformation),
(33,'rep',no_transformation),
(34,'srp',no_transformation),
(4,'nd_contact',no_transformation),
(6,'code_categorie',no_transformation),
(8,'acces_reseau',no_transformation),
(9,'etat',no_transformation),
(15',date_essais',strptime),
(19',date_ori',strptime),
(25',date_releve',strptime),
)
Now, you know how to parse your data, and how to name it.
Let just put that stuff into a dict.
def parse(row):
"""Transform a row into a dict
Args:
row (tuple): Your row's data
Returns:
dict: Your parsed data, named into a dict.
"""
return {
key:tranfsorm(row[index]) for index, key, transform in PARSER_CONFIG
}
From here, your parser is way more readable, you know exactly what you're doing with your data.
Wrapping this up all together, you should get:
PARSER_CONFIG=(
#(column_index, variable_name, transformation_function)
#...
)
def no_transformation(value)
return str(value)
def strptime(value)
return str(value)
def parse(row):
"""Transform a row into a dict
Args:
row (tuple): Your row's data
Returns:
dict: Your parsed data, named into a dict.
"""
return {
key:tranfsorm(row[index]) for index, key, transform in PARSER_CONFIG
}
for row in rows:
item = parse(row) #< Your data, without related instances yet....
Still have some work to create your related instances, but we'll get there eventually.
2. Removing useless queries.
You do :
#...First, your create a record
Client(nd=nd, nom=nom_client, mobile=nd_contact).save()
#... Then you fetch it when saving DerangementAdsl
nd_client=Client.objects.get(nd=nd)
While a more pythonic way of doing this would be:
#... You create and assign your istance.
client = Client(nd=item.get('nd'),
nom=item.get('nom_client'),
mobile=item.get('nd_contact')).save()
#...
nd_client=client
You just earned one SQL query/row! Doing the same logic for each models, and you'll earn around 20 queries per row!
categorie=Categorie.objects.create(code_categorie=item.get('code_categorie'), nom="Public"),
#Enregistrer agent de signalisation
agent_sig=AgentSig.objects.create(matricule=item.get('agent_sig'), nom="Awa"),
#Enregistrer agent d'essai
agent_essai=AgentEssai.objects.create(matricule=item.get('agent_essai')),
#Enregister agent d'orientation
agent_ori=AgentOri.objects.create(matricule=item.get('agent_ori')),
#Enregistrer agent de relève
agent_rel=AgentRel.objects.create(matricule=item.get('agent_releve')),
#Enregistrer le sous-traitant
sous_traitant=SousTraitant.objects.create(nom=item.get('sous_traitant')),
#Enregistrer le centre
centre=Centre.objects.create(code=item.get('centre_racc')),
#Enregistrer ui
unite_int=UniteIntervention.objects.create(code_ui=item.get('code_ui'), sous_traitant=sous_traitant), # < You earn one extrat query with sous_traitant
#Enregistrer le repartiteur
rep=Repartiteur.objects.create(code=item.get('rep'), crac=centre), # < You earn one extrat query with centre
#Enregistrer team
equipe=Equipe.objects.create(nom=item.get('equipe')), unite=unite_int),# < You earn one extrat query with unite_int
#Enregistrer le SR
srep=SousRepartiteur.objects.create(code=item.get('srp'), rep=rep),# < You earn one extrat query with rep
3. Avoid related records duplication
Now there is one big issue:
Considering you have multiple rows for each client,
you'll eventually find yourself with many duplicates, and you do not want that.
Instead of using create, you should go with get_or_create.
Please note it returns a tuple: (instance, created)
So.... your code should go like:
categorie, categorie_created=Categorie.objects.get_or_create(code_categorie=item.get('code_categorie'), nom="Public"),
agent_sig, agent_sig_created=AgentSig.objects.get_or_create(matricule=item.get('agent_sig'), nom="Awa"),
agent_essai, agent_essai_created=AgentEssai.objects.get_or_create(matricule=item.get('agent_essai')),
agent_ori, agent_ori_created=AgentOri.objects.get_or_create(matricule=item.get('agent_ori')),
agent_rel, agent_rel_created=AgentRel.objects.get_or_create(matricule=item.get('agent_releve')),
sous_traitant, sous_traitant_created=SousTraitant.objects.get_or_create(nom=item.get('sous_traitant')),
centre, centre_created=Centre.objects.get_or_create(code=item.get('centre_racc')),
unite_int, unite_int_created=UniteIntervention.objects.get_or_create(code_ui=item.get('code_ui'), sous_traitant=sous_traitant)
rep, rep_created=Repartiteur.objects.get_or_create(code=item.get('rep'), crac=centre)
equipe, equipe_created=Equipe.objects.get_or_create(nom=item.get('equipe')), unite=unite_int
srep, srep_created=SousRepartiteur.objects.get_or_create(code=item.get('srp'), rep=rep)
Tadaaaaam, you'll create records that are "only" necessary for your related objects.
4. Caching out your related objects.
As in previous topic, I consider you have multiple rows for each related instance,
and for each row, you will still get to fetch that from your DB.
It's OK I guess if you're using SQLite in memory, it won't be as slow as with other DBs, still, it'll be a bottleneck.
You could use an approach like:
MODEL_CACHE = {}
def get_related_instance(model, **kwargs):
key = (model,kwargs)
if key in MODEL_CACHE:
return instance MODEL_CACHE[key]
else:
instance, create = model.objects.get_or_create(**kwargs)
MODEL_CACH[key]=instance
return instance
# Instead of having previous lines now you end up with:
categorie = get_related_instance(Categorie,code_categorie=item.get('code_categorie'), nom="Public"),
agent_sig = get_related_instance(AgentSig,matricule=item.get('agent_sig'), nom="Awa"),
agent_essai = get_related_instance(AgentEssai,matricule=item.get('agent_essai')),
agent_ori = get_related_instance(AgentOri,matricule=item.get('agent_ori')),
agent_rel = get_related_instance(AgentRel,matricule=item.get('agent_releve')),
sous_traitant = get_related_instance(SousTraitant,nom=item.get('sous_traitant')),
centre = get_related_instance(Centre,code=item.get('centre_racc')),
unite_int = get_related_instance(UniteIntervention,code_ui=item.get('code_ui'), sous_traitant=sous_traitant)
rep = get_related_instance(Repartiteur,code=item.get('rep'), crac=centre)
equipe = get_related_instance(Equipe,nom=item.get('equipe')), unite=unite_int
srep = get_related_instance(SousRepartiteur,code=item.get('srp'), rep=rep)
I cannot tell how much you'll gain thanks to that, it really depends on the data set you're trying to import,
but from experience, it's quite drastic!
5 Use bulk_create
You are doing
for row in rows:
DerangementAdsl(...your data...).save() #<That's one DB call
That's one SQL query per row, while you could do:
ITEMS = []
for row in rows:
#...Your parsing we saw previously...
ITEMS.append(DerangementAdsl(**item))
DerangementAdsl.objects.bulk_create(ITEMS) #<That's one DB call
Putting it all together!
PARSER_CONFIG=(
#(column_index, variable_name, transformation_function)
#...
)
def no_transformation(value)
return str(value)
def strptime(value)
return str(value)
MODEL_CACHE = {}
def get_related_instance(model, **kwargs):
key = (mode,kwargs)
if key in MODEL_CACHE:
return instance MODEL_CACHE[key]
else:
instance, create = model.objects.get_or_create(**kwargs)
MODEL_CACH[key]=instance
return instance
def parse(row):
"""Transform a row into a dict
Args:
row (tuple): Your row's data
Returns:
dict: Your parsed data, named into a dict.
"""
item= {
key:tranfsorm(row[index]) for index, key, transform in PARSER_CONFIG
}
item.update({
'categorie': get_related_instance(Categorie,code_categorie=item.get('code_categorie'), nom="Public"),
'agent_sig': get_related_instance(AgentSig,matricule=item.get('agent_sig'), nom="Awa"),
'agent_essai': get_related_instance(AgentEssai,matricule=item.get('agent_essai')),
'agent_ori': get_related_instance(AgentOri,matricule=item.get('agent_ori')),
'agent_rel': get_related_instance(AgentRel,matricule=item.get('agent_releve')),
'sous_traitant': get_related_instance(SousTraitant,nom=item.get('sous_traitant')),
'centre': get_related_instance(Centre,code=item.get('centre_racc')),
'unite_int': get_related_instance(UniteIntervention,code_ui=item.get('code_ui'), sous_traitant=sous_traitant)
'rep': get_related_instance(Repartiteur,code=item.get('rep'), crac=centre)
'equipe': get_related_instance(Equipe,nom=item.get('equipe')), unite=unite_int
'srep': get_related_instance(SousRepartiteur,code=item.get('srp'), rep=rep)
})
return item
def importeradsl(request):
#I skip your conditions for readility
ITEMS = []
for row in worksheet.iter_rows(min_row=2):
ITEMS.append(DerangementAdsl(**parse(row)))
DerangementAdsl.objects.bulk_create(ITEMS)
Conclusion
Following those recommendation, you should end up with an optimized script that will run way faster than the original one, and be way more readable and pythonic
Roughly, depending on your dataset, 5k lines should run somewhere between 10 seconds up to few minutes.
If each row's related instance (client,category...) is unique, I'd use a more sophisticated approach looping multiple times over your dataset to create related models using bulk_create and cache them out like:
CLIENTS = []
for row in rows:
CLIENTS.append(Client(**client_parser(row)))
clients=Client.objects.bulk_create(CLIENTS) # You Create *all* your client with only one DB call!
Then, you cache all created clients. You do the same for all your related models and eventually you'll load your data making a dozen of DB calls, but it really depends on your business logic here: It should be engineered to handle duplicated records too.
I'm trying to store temporal embedding in pytable. There are 12 tables and each table has more than 130,000 rows, where each table has two columns (word varchar, embedding float(numpy.arry(300,))). What I want is to calculate cosine similarity for a given against all the word in a given table and repeat this for all 12 tables. Presently I'm doing it sequentially by iterating on each table but it takes around 15 minutes to calculate for all 12 tables.
So my question is, is it possible to read all the table concurrently? I used multithreading but I error
Segmentation fault: 11
Below is my code snippet
def synchronized_open_file():
with lock:
return tb.open_file(FILENAME, mode="r", title="embedding DB")
def synchronized_close_file(self, *args, **kwargs):
with lock:
return self.close(*args, **kwargs)
outqueue = queue.Queue()
for table in list_table :
thread = threading.Thread(target=self.top_n_similar, args=(table,))
thread.start()
threads.append(thread)
try:
for _ in range(len(threads)):
result = outqueue.get()
if isinstance(result, Exception):
raise result
else:
top_n_neighbor_per_period[result[0]] = result[1]
finally:
for thread in threads:
thread.join()
def top_n_similar(table_name):
H5FILE = synchronized_open_file()
do work()
outqueue.put(result)
finally :
synchronized_close_file(H5FILE)
Yes, you can access multiple pytable objects simultaneously. A simple example is provided below that creates 3 tables using a (300,2) numpy record array created with random data. It demonstrates you can access all 3 tables as table objects -OR- as numpy arrays (or both).
I have not done multi-threading with pytables, so can't help with that. I suggest you get your code to work in serial before adding multi-threading. Also, review the pytables docs. I know h5py has specific procedures to use mpi4py for multi-threading. Pytables may have similar requirements.
Code Sample
import tables as tb
import numpy as np
h5f = tb.open_file('SO_55445040.h5',mode='w')
mydtype = np.dtype([('word',float),('varchar',float)])
arr = np.random.rand(300,2)
recarr = np.core.records.array(arr,dtype=mydtype)
h5f.create_table('/', 'table1', obj=recarr )
recarr = np.core.records.array(2.*arr,dtype=mydtype)
h5f.create_table('/', 'table2', obj=recarr )
recarr = np.core.records.array(3.*arr,dtype=mydtype)
h5f.create_table('/', 'table3', obj=recarr )
h5f.close()
h5f = tb.open_file('SO_55445040.h5',mode='r')
# Return Table ojbects:
tb1 = h5f.get_node('/table1')
tb2 = h5f.get_node('/table2')
tb3 = h5f.get_node('/table3')
# Return numpy arrays:
arr1 = h5f.get_node('/table1').read
arr2 = h5f.get_node('/table2').read
arr3 = h5f.get_node('/table3').read
h5f.close()
Initially tried using pd.read_sql().
Then I tried using sqlalchemy, query objects but none of these methods are
useful as the sql getting executed for long time and it never ends.
I tried using Hints.
I guess the problem is the following: Pandas creates a cursor object in the
background. With cx_Oracle we cannot influence the "arraysize" parameter which
will be used thereby, i.e. always the default value of 100 will be used which
is far too small.
CODE:
import pandas as pd
import Configuration.Settings as CS
import DataAccess.Databases as SDB
import sqlalchemy
import cx_Oracle
dfs = []
DBM = SDB.Database(CS.DB_PRM,PrintDebugMessages=False,ClientInfo="Loader")
sql = '''
WITH
l AS
(
SELECT DISTINCT /*+ materialize */
hcz.hcz_lwzv_id AS lwzv_id
FROM
pm_mbt_materialbasictypes mbt
INNER JOIN pm_mpt_materialproducttypes mpt ON mpt.mpt_mbt_id = mbt.mbt_id
INNER JOIN pm_msl_materialsublots msl ON msl.msl_mpt_id = mpt.mpt_id
INNER JOIN pm_historycompattributes hca ON hca.hca_msl_id = msl.msl_id AND hca.hca_ignoreflag = 0
INNER JOIN pm_tpm_testdefprogrammodes tpm ON tpm.tpm_id = hca.hca_tpm_id
inner join pm_tin_testdefinsertions tin on tin.tin_id = tpm.tpm_tin_id
INNER JOIN pm_hcz_history_comp_zones hcz ON hcz.hcz_hcp_id = hca.hca_hcp_id
WHERE
mbt.mbt_name = :input1 and tin.tin_name = 'x1' and
hca.hca_testendday < '2018-5-31' and hca.hca_testendday > '2018-05-30'
),
TPL as
(
select /*+ materialize */
*
from
(
select
ut.ut_id,
ut.ut_basic_type,
ut.ut_insertion,
ut.ut_testprogram_name,
ut.ut_revision
from
pm_updated_testprogram ut
where
ut.ut_basic_type = :input1 and ut.ut_insertion = :input2
order by
ut.ut_revision desc
) where rownum = 1
)
SELECT /*+ FIRST_ROWS */
rcl.rcl_lotidentifier AS LOT,
lwzv.lwzv_wafer_id AS WAFER,
pzd.pzd_zone_name AS ZONE,
tte.tte_tpm_id||'~'||tte.tte_testnumber||'~'||tte.tte_testname AS Test_Identifier,
case when ppd.ppd_measurement_result > 1e15 then NULL else SFROUND(ppd.ppd_measurement_result,6) END AS Test_Results
FROM
TPL
left JOIN pm_pcm_details pcm on pcm.pcm_ut_id = TPL.ut_id
left JOIN pm_tin_testdefinsertions tin ON tin.tin_name = TPL.ut_insertion
left JOIN pm_tpr_testdefprograms tpr ON tpr.tpr_name = TPL.ut_testprogram_name and tpr.tpr_revision = TPL.ut_revision
left JOIN pm_tpm_testdefprogrammodes tpm ON tpm.tpm_tpr_id = tpr.tpr_id and tpm.tpm_tin_id = tin.tin_id
left JOIN pm_tte_testdeftests tte on tte.tte_tpm_id = tpm.tpm_id and tte.tte_testnumber = pcm.pcm_testnumber
cross join l
left JOIN pm_lwzv_info lwzv ON lwzv.lwzv_id = l.lwzv_id
left JOIN pm_rcl_resultschipidlots rcl ON rcl.rcl_id = lwzv.lwzv_rcl_id
left JOIN pm_pcm_zone_def pzd ON pzd.pzd_basic_type = TPL.ut_basic_type and pzd.pzd_pcm_x = lwzv.lwzv_pcm_x and pzd.pzd_pcm_y = lwzv.lwzv_pcm_y
left JOIN pm_pcm_par_data ppd ON ppd.ppd_lwzv_id = l.lwzv_id and ppd.ppd_tte_id = tte.tte_id
'''
#method1: using query objects.
Q = DBM.getQueryObject(sql)
Q.execute({"input1":'xxxx',"input2":'yyyy'})
while not Q.AtEndOfResultset:
print Q
#method2: using sqlalchemy
connectstring = "oracle+cx_oracle://username:Password#(description=
(address_list=(address=(protocol=tcp)(host=tnsconnect string)
(port=pertnumber)))(connect_data=(sid=xxxx)))"
engine = sqlalchemy.create_engine(connectstring, arraysize=10000)
df_p = pd.read_sql(sql, params=
{"input1":'xxxx',"input2":'yyyy'}, con=engine)
#method3: using pd.read_sql()
df_p = pd.read_sql_query(SQL_PCM, params=
{"input1":'xxxx',"input2":'yyyy'},
coerce_float=True, con= DBM.Connection)
It would be great if some one could help me out in this. Thanks in advance.
And yet another possibility to adjust the array size without needing to create oraaccess.xml as suggested by Chris. This may not work with the rest of your code as is, but it should give you an idea of how to proceed if you wish to try this approach!
class Connection(cx_Oracle.Connection):
def __init__(self):
super(Connection, self).__init__("user/pw#dsn")
def cursor(self):
c = super(Connection, self).cursor()
c.arraysize = 5000
return c
engine = sqlalchemy.create_engine(creator=Connection)
pandas.read_sql(sql, engine)
Here's another alternative to experiment with.
Set a prefetch size by using the external configuration available to Oracle Call Interface programs like cx_Oracle. This overrides internal settings used by OCI programs. Create an oraaccess.xml file:
<?xml version="1.0"?>
<oraaccess xmlns="http://xmlns.oracle.com/oci/oraaccess"
xmlns:oci="http://xmlns.oracle.com/oci/oraaccess"
schemaLocation="http://xmlns.oracle.com/oci/oraaccess
http://xmlns.oracle.com/oci/oraaccess.xsd">
<default_parameters>
<prefetch>
<rows>1000</rows>
</prefetch>
</default_parameters>
</oraaccess>
If you use tnsnames.ora or sqlnet.ora for cx_Oracle, then put the oraaccess.xml file in the same directory. Otherwise, create a new directory and set the environment variable TNS_ADMIN to that directory name.
cx_Oracle needs to be using Oracle Client 12c, or later, libraries.
Experiment with different sizes.
See OCI Client-Side Deployment Parameters Using oraaccess.xml.