SSIS Multi conditional If statement - if-statement

I am a new user in SSIS. I am wondering how I can complete the following below.
I need an if statement that reads IF Column 1 = 1 then 1, elseif column 2 = 2 then 2 elseif column 3 = 3 then 3 else 0. Just need to consolidate 3 columns into 1 column in SSIS.
Current SSIS Formula is only giving me column 1 and Null. I want the new column to show 1, 2,3, 0 etc.
[Column 1] == "1" ? "1" : [column 2] == "2" ? "2" : [Column 3] == "3" ? "3" : "0"

What you have is correct for the supplied data and conditions
Given a minimal reproduction.
Source query
SELECT
*
FROM
(
VALUES
('1', '2', '3', '4', 'ROW_0')
, ('X', '2', '3', '4', 'ROW_1')
, ('X', 'X', '3', '4', 'ROW_2')
, ('X', 'X', 'X', '4', 'ROW_3')
, ('X', 'X', 'X', 'X', 'ROW_4')
)D(Column1, Column2, Column3, Column4, ColNotes);
Derived column formula - same as your except I eliminate the space to avoid typing square brackets
Column1 == "1" ? "1" : Column2 == "2" ? "2" : Column3 == "3" ? "3" : "0"
The only way you'd have a NULL in the derived column was if you had a NULL in the inbound data.
Adding this row in to source query will result in a NULL as the generated value.
, (NULL, 'X', 'X', 'X', 'ROW_5')
If you need to guard against this condition, then you need to lead off with the null check in your expression
(ISNULL(Column1) || ISNULL(Column2) || ISNULL(Column3)) ? "0" : Column1 == "1" ? "1" : Column2 == "2" ? "2" : Column3 == "3" ? "3" : "0"

Related

AWS Redshift Stored Procedure Abortions

Stored Procedure Abortions
I've got two stored procedures which run every 4 hours. I'm experiencing this odd behaviour where both of these procedures get aborted exactly the same number of times as successful runs (see table below). Using pg_catalog.svl_stored_proc_call to get the proc run status.
When I look at pg_catalog.stl_load_errors I can't see any errors for the runs.
What's the best way to investigate this behaviour?
Code for datawarehouse.p_add_missing_tbls()
DECLARE
row RECORD;
row2 RECORD;
BEGIN
FOR row IN select * from
(
select distinct
concat(concat(lower(regexp_replace(d.service,'-','_')), '_'),
lower(regexp_replace(regexp_replace(d.entity_name,'::',''),'(.)([A-Z]+)','$1_$2'))) as "tbl_name",
concat(concat(concat(concat(lower(regexp_replace(d.service,'-','_')),'_'),lower(regexp_replace(regexp_replace(d.entity_name,'::',''),'(.)([A-Z]+)','$1_$2'))),'_'), d."key") as "key" ,
d.value_type,
case d.value_type
when '0' then 'varchar(32768)'
when '1' then 'numeric(20,10)'
when '2' then 'int(2)'
when '3' then 'timestamp'
when '4' then 'varchar(32768)'
end as "data_type"
from
datawarehouse.definitions d
where
d."key" not in ('id')
)
loop
--List of keys
-- select into row2 '\'' || listagg(distinct "key",'\',\'') || '\'' as keys from datawarehouse.definitions where concat(concat(lower(service),'_'),lower(entity_name)) = row.tbl_name;
-- select into row2 '\'' || listagg(distinct "key",'\',\'') || '\'' as keys from datawarehouse.definitions where concat(concat(lower(definitions.service),'_'),lower(definitions.entity_name)) = row.tbl_name;
select into row2 '\'' || listagg(distinct (concat(concat(concat(concat(lower(regexp_replace(service,'-','_')),'_'),lower(regexp_replace(regexp_replace(entity_name,'::',''),'(.)([A-Z]+)','$1_$2'))),'_'), "key")),'\',\'') || '\'' as keys from datawarehouse.definitions where concat(concat(lower(regexp_replace(definitions.service,'-','_')),'_'),lower(regexp_replace(regexp_replace(definitions.entity_name,'::',''),'(.)([A-Z]+)','$1_$2'))) = row.tbl_name and definitions."key" not in ('id');
--Delete staging tbl
execute 'drop table if exists staging.staging_'||row.tbl_name||';';
--Create staging tbl
EXECUTE 'create table staging.staging_'||row.tbl_name||' AS
select *
from (select
attribute_values.entity_id as '||row.tbl_name||'_id,
-- definitions."key",
concat(concat(concat(concat(lower(regexp_replace(definitions.service,''-'',''_'')),''_''),lower(regexp_replace(regexp_replace(definitions.entity_name,''::'',''''),''(.)([A-Z]+)'',''$1_$2''))),''_''), definitions."key")::varchar as "key",
concat(concat(lower(regexp_replace(definitions.service,''-'',''_'')),''_''),lower(regexp_replace(regexp_replace(definitions.entity_name,''::'',''''),''(.)([A-Z]+)'',''$1_$2''))) as "tbl_name",
attribute_values.updated_at as "updated_at",
attribute_values.destroyed_upstream as "deleted_upstream",
case definitions.value_type
when ''0'' then attribute_values.string_value::varchar
when ''1'' then attribute_values.number_value::varchar
when ''2'' then attribute_values.boolean_value::varchar
when ''3'' then attribute_values.datetime_value::varchar
when ''4'' then attribute_values.array_value::varchar
end as "final_value"
from
datawarehouse.attribute_values
left join
datawarehouse.definitions
on
definitions.id = attribute_values.definition_id
where
attribute_values.updated_at>= coalesce(((select max(updated_at) from datawarehouse.'|| row.tbl_name || ' )), (select min(av.updated_at) from datawarehouse.attribute_values av ))
and
tbl_name='''||row.tbl_name||'''
and
definitions."key" not in (''id'')
order by
entity_id desc)
PIVOT (max(final_value) for "key" in ( ' || row2.keys || ' )
);';
-- Drop staging.staging_col_info_v
-- execute 'drop view staging.staging_col_info_v;';
-- Create view
execute ' create or replace view staging.staging_col_info_v AS
with staging_tbl_info as (
select
d.table_schema ,
d.table_name ,
d.column_name as "column_name"
from
pg_catalog.svv_columns d
where
d.table_schema = ''staging''
and
d.table_name like ''staging_%''
),
tbl_info as (
select
col_name,
data_type
from
datawarehouse.tbl_col_v
)
select
*
from
staging_tbl_info s
inner join
tbl_info h
on
h.col_name = s.column_name;';
END LOOP;
RETURN;

How to read a sequence of numbers and stop the data entry by entering -1?

Develop the code to read a sequence of numbers entered by the user and display it. The user is told to enter the number -1 to indicate the end of data entry.
input_list = raw_input('Enter sequence of numbers and enter -1 to indicate the end of data entry: ')
list = input_list.split()
list = [int(a) for a in input_list:if(a == -1): break]
print 'list: ',list
I am expecting to get:
ex1)Enter sequence of numbers and enter -1 to indicate the end of data entry: 1 3 5 -1 6 2
list: [1, 3, 5]
ex2)Enter sequence of numbers and enter -1 to indicate the end of data entry: 1 3 5 2 4 -1 2 6 2
list: [1, 3, 5, 2, 4]
However and of course, the code does not work.
You can use the function split twice. The first split will allow you to stop at the first "-1", the second will allow you to differenciate the numbers :
input_list = raw_input('Enter sequence of numbers and enter -1 to indicate the end of data entry: ')
numbers = input_list.split('-1')[0].split(' ')[:-1]
print(numbers)
# With : 1 3 5 -1 6 2
Out [1] : ['1', '3', '5']
# With 1 3 5 2 4 -1 2 6 2
Out [2] : ['1', '3', '5', '2', '4']
Side note : Be careful list is a protected variable name in python

summing up the values in a dictionary

I created a dictionary with the following syntax
frequency_m= dict(zip(unique, counts))
which results into:
{0: 3512488, 1: 2606, 2: 3553, 3: 3929, ..........}
I want to classify the key, value pairs as binary - '1' or '0'. Below I represented
for k, v in frequency_m.iteritems():
if k ==0:
print '0', v
else:
print '1', sum(v)
obviously that generates TypeError: 'numpy.int64' object is not iterable. I am sure I need to iterate over the values and sum that up for the values other than '0'. I am not getting it. Any thoughts?
0 3512488
1 2606
1 3553
1 3929
my goal here is to output the table as
0 3512488
1 10088
I tried following as well: ** np.sum((value for key, value in frequency_m.iteritems() if key != '0'))**, it sums up all the values and does not yield my goal.
Just change your comprehension to check for 0 instead of '0':
np.sum((value for key, value in frequency_m.iteritems() if key != 0))

extra commas when using read_csv causing too many "s in data frame

I'm trying to read in a large file (~8Gb) using pandas read_csv. In one of the columns in the data, there is sometimes a list which includes commas but it enclosed by curly brackets e.g.
"label1","label2","label3","label4","label5"
"{A1}","2","","False","{ "apple" : false, "pear" : false, "banana" : null}
Therefore, when these particular lines were read in I was getting the error "Error tokenizing data. C error: Expected 37 fields in line 35, saw 42". I found this solution which said to add
sep=",(?![^{]*})" into the read_csv arguments which worked with splitting the data correctly. However, the data now includes the quotation marks around every entry (this didn't happen before I added the sep argument in).
The data looks something like this now:
"label1" "label2" "label3" "label4" "label5"
"{A1}" "2" "" "False" "{ "apple" : false, "pear" : false, "banana" : null}"
meaning I can't use, for example, .describe(), etc on the numerical data because they're still strings.
Does anyone know of a way of reading it in without the quotation marks but still splitting the data where it is?
Very new to Python so apologies if there is an obvious solution.
serialdev found a solution to removing the "s but the data columns are objects and not what I would expect/want, e.g. the integer values aren't seen as integers.
The data needs to be split at "," explicitly (including the "s), is there a way of stating that in the read_csv arguments?
Thanks!
To read in the data structure you specified, where the last element is an unknown length.
"{A1}","2","","False","{ "apple" : false, "pear" : false, "banana" : null}"
"{A1}","2","","False","{ "apple" : false, "pear" : false, "banana" : null, "orange": "true"}"
Change the separate to a regular expression using a negative forward lookahead assertion. This will enable you to separate on a ',' only when not immediately followed by a space.
df = pd.read_csv('my_file.csv', sep='[,](?!\s)', engine='python', thousands='"')
print df
0 1 2 3 4
0 "{A1}" 2 NaN "False" "{ "apple" : false, "pear" : false, "banana" :...
1 "{A1}" 2 NaN "False" "{ "apple" : false, "pear" : false, "banana" :...
Specifying the thousands separator as the quote is a bit of a hackie way to parse fields contains a quoted integer into the correct datatype. You can achieve the same result using converters which can also remove the quotes from the strings should you need it to and cast "True" or "False" to a boolean.
If need remove " from column, use vectorized function str.strip:
import pandas as pd
mydata = [{'"first_name"': '"Bill"', '"age"': '"7"'},
{'"first_name"': '"Bob"', '"age"': '"8"'},
{'"first_name"': '"Ben"', '"age"': '"9"'}]
df = pd.DataFrame(mydata)
print (df)
"age" "first_name"
0 "7" "Bill"
1 "8" "Bob"
2 "9" "Ben"
df['"first_name"'] = df['"first_name"'].str.strip('"')
print (df)
"age" "first_name"
0 "7" Bill
1 "8" Bob
2 "9" Ben
If need apply function str.strip() to all columns, use:
df = pd.concat([df[col].str.strip('"') for col in df], axis=1)
df.columns = df.columns.str.strip('"')
print (df)
age first_name
0 7 Bill
1 8 Bob
2 9 Ben
Timings:
mydata = [{'"first_name"': '"Bill"', '"age"': '"7"'},
{'"first_name"': '"Bob"', '"age"': '"8"'},
{'"first_name"': '"Ben"', '"age"': '"9"'}]
df = pd.DataFrame(mydata)
df = pd.concat([df]*3, axis=1)
df.columns = ['"first_name1"','"age1"','"first_name2"','"age2"','"first_name3"','"age3"']
#create sample [300000 rows x 6 columns]
df = pd.concat([df]*100000).reset_index(drop=True)
df1,df2 = df.copy(),df.copy()
def a(df):
df.columns = df.columns.str.strip('"')
df['age1'] = df['age1'].str.strip('"')
df['first_name1'] = df['first_name1'].str.strip('"')
df['age2'] = df['age2'].str.strip('"')
df['first_name2'] = df['first_name2'].str.strip('"')
df['age3'] = df['age3'].str.strip('"')
df['first_name3'] = df['first_name3'].str.strip('"')
return df
def b(df):
#apply str function to all columns in dataframe
df = pd.concat([df[col].str.strip('"') for col in df], axis=1)
df.columns = df.columns.str.strip('"')
return df
def c(df):
#apply str function to all columns in dataframe
df = df.applymap(lambda x: x.lstrip('\"').rstrip('\"'))
df.columns = df.columns.str.strip('"')
return df
print (a(df))
print (b(df1))
print (c(df2))
In [135]: %timeit (a(df))
1 loop, best of 3: 635 ms per loop
In [136]: %timeit (b(df1))
1 loop, best of 3: 728 ms per loop
In [137]: %timeit (c(df2))
1 loop, best of 3: 1.21 s per loop
Would this work since you have all the data that you need:
.map(lambda x: x.lstrip('\"').rstrip('\"'))
So simply clean up all the occurrences of " afterwards
EDIT with example:
mydata = [{'"first_name"' : '"bill', 'age': '"75"'},
{'"first_name"' : '"bob', 'age': '"7"'},
{'"first_name"' : '"ben', 'age': '"77"'}]
IN: df = pd.DataFrame(mydata)
OUT:
"first_name" age
0 "bill "75"
1 "bob "7"
2 "ben "77"
IN: df['"first_name"'] = df['"first_name"'].map(lambda x: x.lstrip('\"').rstrip('\"'))
OUT:
0 bill
1 bob
2 ben
Name: "first_name", dtype: object
Use this sequence after selecting the column, it is not ideal but will get the job done:
.map(lambda x: x.lstrip('\"').rstrip('\"'))
You can change the Dtypes after using this pattern:
df['col'].apply(lambda x: pd.to_numeric(x, errors='ignore'))
or simply:
df[['col2','col3']] = df[['col2','col3']].apply(pd.to_numeric)
It depend on your file. Did you check your data if there is comma or not, in cell ? If you have like this e.g Banana : Fruit, Tropical, Eatable, etc. in same cell, you're gonna get this kind of bug. One of basic solution is removing all commas in a file. Or, if you can read it, you can remove special characters :
>>>df
Banana
0 Hello, Salut, Salom
1 Bonjour
>>>df['Banana'] = df['Banana'].str.replace(',','')
>>>df
Banana
0 Hello Salut Salom
1 Bonjour

Oracle REGEX_SUBSTR Not Honoring null values

I have an issue of regex_substr not honoring the null value.
select
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 1) AS phn_nbr,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 2) AS phn_pos,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 3) AS phn_typ,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 4) AS phn_strt_dt,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 5) AS phn_end_dt,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 6) AS pub_indctr
from dual;
If the phn_end_dt is null and pub_indctr is not null, the values of pub_indctr are shifted to phn_end_dt.
Result:-
PHN_NBR PHN_POS PHN_TYP PHN_STRT_DT PHN_END_DT PUB_INDCTR
---------- ------- ------- ----------- ---------- ------------
2035197553 2 S 14-JUN-14 P
While it should be
PHN_NBR PHN_POS PHN_TYP PHN_STRT_DT PHN_END_DT PUB_INDCTR
---------- ------- ------- ----------- ---------- ------------
2035197553 2 S 14-JUN-14 P
Any suggestions ?
I'm afraid your accepted answer does not handle the case where you need the value after the null position (try to get the 6th field):
SQL> select REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]*', 1, 6) phn_end
_dt
2 from dual;
P
-
You need to do this instead I believe (works on 11g):
SQL> select REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '([^,]*)(,|$)', 1, 6,
NULL, 1) phn_end_dt
2 from dual;
P
-
P
I just discovered this after posting my own question: REGEX to select nth value from a list, allowing for nulls
You can solve your task like this:
with t(val) as (
select '2035197553,2,S,14-JUN-14,,P' from dual
), t1 (val) as (
select ',' || val || ',' from t
)
select substr(val, REGEXP_INSTR(val, ',', 1, 1) + 1, REGEXP_INSTR(val, ',', 1, 1 + 1) - REGEXP_INSTR(val, ',', 1, 1) - 1) a
, substr(val, REGEXP_INSTR(val, ',', 1, 2) + 1, REGEXP_INSTR(val, ',', 1, 2 + 1) - REGEXP_INSTR(val, ',', 1, 2) - 1) b
, substr(val, REGEXP_INSTR(val, ',', 1, 3) + 1, REGEXP_INSTR(val, ',', 1, 3 + 1) - REGEXP_INSTR(val, ',', 1, 3) - 1) c
, substr(val, REGEXP_INSTR(val, ',', 1, 4) + 1, REGEXP_INSTR(val, ',', 1, 4 + 1) - REGEXP_INSTR(val, ',', 1, 4) - 1) d
, substr(val, REGEXP_INSTR(val, ',', 1, 5) + 1, REGEXP_INSTR(val, ',', 1, 5 + 1) - REGEXP_INSTR(val, ',', 1, 5) - 1) e
, substr(val, REGEXP_INSTR(val, ',', 1, 6) + 1, REGEXP_INSTR(val, ',', 1, 6 + 1) - REGEXP_INSTR(val, ',', 1, 6) - 1) f
from t1
A B C D E F
-------------------------------------
2035197553 2 S 14-JUN-14 - P
The typical csv parsing approach is as follows:
WITH t(csv_str) AS
( SELECT '2035197553,2,S,14-JUN-14,,P' FROM dual
UNION ALL
SELECT '2035197553,2,S,14-JUN-14,,' FROM dual
)
SELECT LTRIM(REGEXP_SUBSTR (','
|| csv_str, ',[^,]*', 1, 1), ',') AS phn_nbr,
LTRIM(REGEXP_SUBSTR (','
|| csv_str, ',[^,]*', 1, 2), ',') AS phn_pos,
LTRIM(REGEXP_SUBSTR (','
|| csv_str, ',[^,]*', 1, 3), ',') AS phn_typ,
LTRIM(REGEXP_SUBSTR (','
|| csv_str, ',[^,]*', 1, 4), ',') AS phn_strt_dt,
LTRIM(REGEXP_SUBSTR (','
|| csv_str, ',[^,]*', 1, 5), ',') AS phn_end_dt,
LTRIM(REGEXP_SUBSTR (','
|| csv_str, ',[^,]*', 1, 6), ',') AS pub_indctr
FROM t
I like to place a comma preceeding my csv and then I would count the commas with the non-comma pattern.
Explanation of the search pattern
The search pattern looks for the nth substring (nth corresponds with the nth element in the csv) which has the following:
-The pattern begins with a ','
-Next, it is followed by the pattern, '[^,]'. This is just a non-matching list expression. The caret, ^, conveys that the characters following in the list should not be matched.
-This non-matching list of characters has the quantifier, *, which means this can occur 0 or more times.
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Once a match is found, I would also use the LTRIM function to remove the comma after I used the reg expression.
What is nice about this approach is the occurrence of the search pattern will always correspond with the occurences of the comma.
You need to change this line,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]+', 1, 5) AS phn_end_dt,
to,
REGEXP_SUBSTR ('2035197553,2,S,14-JUN-14,,P', '[^,]*', 1, 5) AS phn_end_dt,
^
[^,]+ means it matches any character not of , one or more times. [^,]* means it matches any character not of , zero or more times. So [^,]+ assumes that there must be a single character not of , would present. But really there isn't , by changing + to * makes the regex engine to match a empty character.
Thanks for pointing me in the right direction, I have used this to solve the issue.
SELECT REGEXP_SUBSTR (val, '([^,]*),|$', 1, 1, NULL, 1) phn_nbr ,
REGEXP_SUBSTR (val, '([^,]*),|$', 1, 2, NULL, 1) phn_pos ,
REGEXP_SUBSTR (val, '([^,]*),|$', 1, 3, NULL, 1) phn_typ ,
REGEXP_SUBSTR (val, '([^,]*),|$', 1, 4, NULL, 1) phn_strt_dt ,
REGEXP_SUBSTR (val, '([^,]*),|$', 1, 5, NULL, 1) phn_end_dt ,
REGEXP_SUBSTR (val
|| ',', '([^,]*),|$', 1, 6, NULL, 1) pub_indctr
FROM
(SELECT '2035197553,2,S,14-JUN-14,,P' val FROM dual
);
Oracle Version:- Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
I have a generic use case where I don't know the exact columns coming in the string. I thus used below code which solved the purpose.
function substring_specific_occurence(p_string varchar2
,p_delimiter varchar2
,p_occurence number) return varchar2
is
l_output varchar2(2000);
g_miss_char varchar2(20) := 'fdkjkjhkuhhf7';
l_string varchar2(10000) := replace(p_string,p_delimiter||p_delimiter,''||p_delimiter||g_miss_char||p_delimiter||'' );
begin
while (l_string like '%'||p_delimiter||p_delimiter||'%' )
loop
l_string := replace(l_string,p_delimiter||p_delimiter,''||p_delimiter||g_miss_char||p_delimiter||'');
end loop;
select regexp_substr(l_string,'[^'||p_delimiter||']+',1,p_occurence)
into l_output
from dual;
return replace(l_output,g_miss_char);
end substring_specific_occurence;