Regular expression SpaCy - regex

I am creating a spaCy regular expression matches for matching number and extracting it pandas data frame.
Question: Panda picks up from number but overwrites value instead of appending. How to solve it?
(original code credit: yarongon)
from __future__ import unicode_literals
import spacy
import re
import pandas as pd
from datetime import date
nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'ner'])
doc = nlp("This is a sample number: 11. This is second sample number: 1145.")
NUM_PATTERN = re.compile(r"\d+")
for match in re.finditer(NUM_PATTERN, doc.text):
start, end = match.span()
Number = doc.char_span(start, end)
print Number
pandas_attributes = [Number,]
df = pd.DataFrame(pandas_attributes,
columns=['Number'])
print df
Output:
11
1145
Number
0 1145
Expected output:
Number
o 11
1 1145
Edit 1:
I am trying multiple pattern match on single text.
from __future__ import unicode_literals
import spacy
import re
import pandas as pd
from datetime import date
nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'ner'])
doc = nlp("This is a sample-number: 11. This is second sample number: 1145.")
NUM_PATTERN = re.compile(r"\d+")
HYPH_PATTERN = re.compile('\w+(?:-)\w+')
for match in re.finditer(NUM_PATTERN, doc.text):
start, end = match.span()
Number = doc.char_span(start, end)
print Number
for match in re.finditer(HYPH_PATTERN, doc.text):
start, end = match.span()
Hyph_word = doc.char_span(start, end)
print Hyph_word
pandas_attributes = [Number,Hyph_word]
df = pd.DataFrame(pandas_attributes,
columns=['Number','Hyphenword'])
print df
Current output.
Output:
11
1145
sample-number
AssertionError: 2 columns passed, passed data had 3 columns
Expected output:
Number Hyphen_word
11 sample-number
1145
edit 2: output
Number Hyphenword
0 (11) (1145)
1 (sample, -, number) Non
Expected output:
Number Hyphenword
0 11 sample-word
1 1145 Non

You need append values to list in loop:
L = []
for match in re.finditer(NUM_PATTERN, doc.text):
start, end = match.span()
L.append(doc.char_span(start, end))
and then use DataFrame constructor:
df = pd.DataFrame(L,columns=['Number'])
You can also append tuples with multiple values:
Sample:
L = []
for x in range(3):
Number = x + 1
Val = x + 4
L.append((Number, Val))
print (L)
[(1, 4), (2, 5), (3, 6)]
df = pd.DataFrame(L,columns=['Number', 'Val'])
print (df)
Number Val
0 1 4
1 2 5
2 3 6
I believe you can use double append:
PATTERNS = [NUM_PATTERN, HYPH_PATTERN]
pandas_attributes = []
for pat in PATTERNS:
L = []
for match in re.finditer(pat, doc.text):
start, end = match.span()
L.append(doc.char_span(start, end))
pandas_attributes.append(L)
df = pd.DataFrame(pandas_attributes,
index=['Number','Hyphenword']).T

Related

Using regex on spaCy PhraseMatcher [duplicate]

I am creating a spaCy regular expression matches for matching number and extracting it pandas data frame.
Question: Panda picks up from number but overwrites value instead of appending. How to solve it?
(original code credit: yarongon)
from __future__ import unicode_literals
import spacy
import re
import pandas as pd
from datetime import date
nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'ner'])
doc = nlp("This is a sample number: 11. This is second sample number: 1145.")
NUM_PATTERN = re.compile(r"\d+")
for match in re.finditer(NUM_PATTERN, doc.text):
start, end = match.span()
Number = doc.char_span(start, end)
print Number
pandas_attributes = [Number,]
df = pd.DataFrame(pandas_attributes,
columns=['Number'])
print df
Output:
11
1145
Number
0 1145
Expected output:
Number
o 11
1 1145
Edit 1:
I am trying multiple pattern match on single text.
from __future__ import unicode_literals
import spacy
import re
import pandas as pd
from datetime import date
nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'ner'])
doc = nlp("This is a sample-number: 11. This is second sample number: 1145.")
NUM_PATTERN = re.compile(r"\d+")
HYPH_PATTERN = re.compile('\w+(?:-)\w+')
for match in re.finditer(NUM_PATTERN, doc.text):
start, end = match.span()
Number = doc.char_span(start, end)
print Number
for match in re.finditer(HYPH_PATTERN, doc.text):
start, end = match.span()
Hyph_word = doc.char_span(start, end)
print Hyph_word
pandas_attributes = [Number,Hyph_word]
df = pd.DataFrame(pandas_attributes,
columns=['Number','Hyphenword'])
print df
Current output.
Output:
11
1145
sample-number
AssertionError: 2 columns passed, passed data had 3 columns
Expected output:
Number Hyphen_word
11 sample-number
1145
edit 2: output
Number Hyphenword
0 (11) (1145)
1 (sample, -, number) Non
Expected output:
Number Hyphenword
0 11 sample-word
1 1145 Non
You need append values to list in loop:
L = []
for match in re.finditer(NUM_PATTERN, doc.text):
start, end = match.span()
L.append(doc.char_span(start, end))
and then use DataFrame constructor:
df = pd.DataFrame(L,columns=['Number'])
You can also append tuples with multiple values:
Sample:
L = []
for x in range(3):
Number = x + 1
Val = x + 4
L.append((Number, Val))
print (L)
[(1, 4), (2, 5), (3, 6)]
df = pd.DataFrame(L,columns=['Number', 'Val'])
print (df)
Number Val
0 1 4
1 2 5
2 3 6
I believe you can use double append:
PATTERNS = [NUM_PATTERN, HYPH_PATTERN]
pandas_attributes = []
for pat in PATTERNS:
L = []
for match in re.finditer(pat, doc.text):
start, end = match.span()
L.append(doc.char_span(start, end))
pandas_attributes.append(L)
df = pd.DataFrame(pandas_attributes,
index=['Number','Hyphenword']).T

Python: create a pandas data frame from a list

I am using the following code to create a data frame from a list:
test_list = ['a','b','c','d']
df_test = pd.DataFrame.from_records(test_list, columns=['my_letters'])
df_test
The above code works fine. Then I tried the same approach for another list:
import pandas as pd
q_list = ['112354401', '116115526', '114909312', '122425491', '131957025', '111373473']
df1 = pd.DataFrame.from_records(q_list, columns=['q_data'])
df1
But it gave me the following errors this time:
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-24-99e7b8e32a52> in <module>()
1 import pandas as pd
2 q_list = ['112354401', '116115526', '114909312', '122425491', '131957025', '111373473']
----> 3 df1 = pd.DataFrame.from_records(q_list, columns=['q_data'])
4 df1
/usr/local/lib/python3.4/dist-packages/pandas/core/frame.py in from_records(cls, data, index, exclude, columns, coerce_float, nrows)
1021 else:
1022 arrays, arr_columns = _to_arrays(data, columns,
-> 1023 coerce_float=coerce_float)
1024
1025 arr_columns = _ensure_index(arr_columns)
/usr/local/lib/python3.4/dist-packages/pandas/core/frame.py in _to_arrays(data, columns, coerce_float, dtype)
5550 data = lmap(tuple, data)
5551 return _list_to_arrays(data, columns, coerce_float=coerce_float,
-> 5552 dtype=dtype)
5553
5554
/usr/local/lib/python3.4/dist-packages/pandas/core/frame.py in _list_to_arrays(data, columns, coerce_float, dtype)
5607 content = list(lib.to_object_array(data).T)
5608 return _convert_object_array(content, columns, dtype=dtype,
-> 5609 coerce_float=coerce_float)
5610
5611
/usr/local/lib/python3.4/dist-packages/pandas/core/frame.py in _convert_object_array(content, columns, coerce_float, dtype)
5666 # caller's responsibility to check for this...
5667 raise AssertionError('%d columns passed, passed data had %s '
-> 5668 'columns' % (len(columns), len(content)))
5669
5670 # provide soft conversion of object dtypes
AssertionError: 1 columns passed, passed data had 9 columns
Why would the same approach work for one list but not another? Any idea what might be wrong here? Thanks a lot!
DataFrame.from_records treats string as a character list. so it needs as many columns as length of string.
You could simply use the DataFrame constructor.
In [3]: pd.DataFrame(q_list, columns=['q_data'])
Out[3]:
q_data
0 112354401
1 116115526
2 114909312
3 122425491
4 131957025
5 111373473
In[20]: test_list = [['a','b','c'], ['AA','BB','CC']]
In[21]: pd.DataFrame(test_list, columns=['col_A', 'col_B', 'col_C'])
Out[21]:
col_A col_B col_C
0 a b c
1 AA BB CC
In[22]: pd.DataFrame(test_list, index=['col_low', 'col_up']).T
Out[22]:
col_low col_up
0 a AA
1 b BB
2 c CC
If you want to create a DataFrame from multiple lists you can simply zip the lists. This returns a 'zip' object. So you convert back to a list.
mydf = pd.DataFrame(list(zip(lstA, lstB)), columns = ['My List A', 'My List B'])
just using concat method
test_list = ['a','b','c','d']
pd.concat(test_list )
You could also take the help of numpy.
import numpy as np
df1 = pd.DataFrame(np.array(q_list),columns=['q_data'])

Filling Value of a Pandas Data Frame From a Large DB Query (Python)

I am running a snippet of code that queries a database and then fills in a pandas dataframe with a value of 1 if that tuple is present in the query. it does this by running the query then iterates over the tuples and fills in the dataframe. However, the query returns almost 8 million rows of data.
My question is if anyone knows how to speed up a process like this. Here is the code below:
user_age = pd.read_sql_query(sql_age, datastore, index_col=['userid']).age.astype(np.int, copy=False)
x = pd.DataFrame(0, index=user_age.index, columns=range(366), dtype=np.int8)
for r in pd.read_sql_query(sql_active, datastore, chunksize=50000):
for userid, day in r.itertuples(index=False):
x.at[userid, day] = 1
Thank you in advance!
You could save some time by replacing the Python loop
for userid, day in r.itertuples(index=False):
x.at[userid, day] = 1
with a NumPy array assignment using "advanced integer indexing":
x[npidx[r['userid']], r['day']] = 1
On a 80000-row DataFrame, using_numpy (below) is about 6x faster:
In [7]: %timeit orig()
1 loop, best of 3: 984 ms per loop
In [8]: %timeit using_numpy()
10 loops, best of 3: 162 ms per loop
import numpy as np
import pandas as pd
def mock_read_sql_query():
np.random.seed(2016)
for arr in np.array_split(index, N//M):
size = len(arr)
df = pd.DataFrame({'userid':arr , 'day':np.random.randint(366, size=size)})
df = df[['userid', 'day']]
yield df
N, M = 8*10**4, 5*10**2
index = np.arange(N)
np.random.shuffle(index)
columns = range(366)
def using_numpy():
npidx = np.empty_like(index)
npidx[index] = np.arange(len(index))
x = np.zeros((len(index), len(columns)), dtype=np.int8)
for r in mock_read_sql_query():
x[npidx[r['userid']], r['day']] = 1
x = pd.DataFrame(x, columns=columns, index=index)
return x
def orig():
x = pd.DataFrame(0, index=index, columns=columns, dtype=np.int8)
for r in mock_read_sql_query():
for userid, day in r.itertuples(index=False):
x.at[userid, day] = 1
return x
expected = orig()
result = using_numpy()
expected_index, expected_col = np.where(expected)
result_index, result_col = np.where(result)
assert np.equal(expected_index, result_index).all()
assert np.equal(expected_col, result_col).all()

pandas how to split twice a given field

I would like to count the top subject in a Column. Some fields have commas or dot I would like to create a new row with them.
import pandas as pd
from pandas import DataFrame, Series
sbj = DataFrame(["Africa, Business", "Oceania",
"Business.Biology.Pharmacology.Therapeutics",
"French Litterature, Philosophy, Arts", "Biology,Business", ""
])
sbj
I would like to split into a new any field that has a '.' or '.'
sbj_top = sbj[0].apply(lambda x: pd.value_counts(x.split(",")) if not pd.isnull(x) else pd.value_counts('---'.split(","))).sum(axis = 0)
sbj_top
I'm getting an error (AttributeError) here while try to re-split('.') it
sbj_top = sbj_top.apply(lambda x: pd.value_counts(x.split(".")) if not pd.isnull(x) else pd.value_counts('---'.split(","))).sum(axis = 0)
sbj_top
My desired output
sbj_top.sort(ascending=False)
plt.title("Distribution of the top 10 subjects")
plt.ylabel("Frequency")
sbj_top.head(10).plot(kind='bar', color="#348ABD")
You can use Counter together with chain from itertools. Note that I first replace periods with commas before parsing.
from collections import Counter
import itertools
from string import whitespace
trimmed_list = [i.replace('.', ',').split(',') for i in sbj[0].tolist() if i != ""]
item_list = [item.strip(whitespace) for item in itertools.chain(*trimmed_list)]
item_count = Counter(item_list)
>>> item_count.most_common()
[('Business', 3),
('Biology', 2),
('Oceania', 1),
('Pharmacology', 1),
('Philosophy', 1),
('Africa', 1),
('French Litterature', 1),
('Therapeutics', 1),
('Arts', 1)]
if you need the output in the form of a DataFrame:
df = pd.DataFrame(item_list, columns=['subject'])
>>> df
subject
0 Africa
1 Business
2 Oceania
3 Business
4 Biology
5 Pharmacology
6 Therapeutics
7 French Litterature
8 Philosophy
9 Arts
10 Biology
11 Business

Quick implementation of character n-grams for word

I wrote the following code for computing character bigrams and the output is right below. My question is, how do I get an output that excludes the last character (ie t)? and is there a quicker and more efficient method for computing character n-grams?
b='student'
>>> y=[]
>>> for x in range(len(b)):
n=b[x:x+2]
y.append(n)
>>> y
['st', 'tu', 'ud', 'de', 'en', 'nt', 't']
Here is the result I would like to get:['st','tu','ud','de','nt]
Thanks in advance for your suggestions.
To generate bigrams:
In [8]: b='student'
In [9]: [b[i:i+2] for i in range(len(b)-1)]
Out[9]: ['st', 'tu', 'ud', 'de', 'en', 'nt']
To generalize to a different n:
In [10]: n=4
In [11]: [b[i:i+n] for i in range(len(b)-n+1)]
Out[11]: ['stud', 'tude', 'uden', 'dent']
Try zip:
>>> def word2ngrams(text, n=3, exact=True):
... """ Convert text into character ngrams. """
... return ["".join(j) for j in zip(*[text[i:] for i in range(n)])]
...
>>> word2ngrams('foobarbarblacksheep')
['foo', 'oob', 'oba', 'bar', 'arb', 'rba', 'bar', 'arb', 'rbl', 'bla', 'lac', 'ack', 'cks', 'ksh', 'she', 'hee', 'eep']
but do note that it's slower:
import string, random, time
def zip_ngrams(text, n=3, exact=True):
return ["".join(j) for j in zip(*[text[i:] for i in range(n)])]
def nozip_ngrams(text, n=3):
return [text[i:i+n] for i in range(len(text)-n+1)]
# Generate 10000 random strings of length 100.
words = [''.join(random.choice(string.ascii_uppercase) for j in range(100)) for i in range(10000)]
start = time.time()
x = [zip_ngrams(w) for w in words]
print time.time() - start
start = time.time()
y = [nozip_ngrams(w) for w in words]
print time.time() - start
print x==y
[out]:
0.314492940903
0.197558879852
True
Although late, NLTK has an inbuilt function that implements ngrams
# python 3
from nltk import ngrams
["".join(k1) for k1 in list(ngrams("hello world",n=3))]
['hel', 'ell', 'llo', 'lo ', 'o w', ' wo', 'wor', 'orl', 'rld']
Ths fucntion gives you ngrams for n = 1 to n:
def getNgrams(sentences, n):
ngrams = []
for sentence in sentences:
_ngrams = []
for _n in range(1,n+1):
for pos in range(1,len(sentence)-_n):
_ngrams.append([sentence[pos:pos+_n]])
ngrams.append(_ngrams)
return ngrams