Formula 1: List all items from Estimate tab
=QUERY(Estimate!A2:D50,"SELECT * where C is not null",0)
Formula 2: Locate task group matches within taskItemAssociations
=ARRAYFORMULA(IFERROR(VLOOKUP(A9:A&B9:B&C9:C&D9:D,
TRIM(IFERROR(SPLIT(TRIM(TRANSPOSE(QUERY(TRANSPOSE(
{INDEX(QUERY(IFERROR(SPLIT(SORT(UNIQUE(IF((LEN('task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&'task-itemAssociations'!D2:D))*(LEN('task-itemAssociations'!E2:E)),
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&'task-itemAssociations'!D2:D&"♦"&'task-itemAssociations'!E2:E, )), 1, 1), "♦")),
"select Col1,count(Col1) where Col1 is not null group by Col1 pivot Col2", 0),,1), IF(
ISNUMBER(QUERY(IFERROR(SPLIT(SORT(UNIQUE(IF((LEN('task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&'task-itemAssociations'!D2:D))*(LEN('task-itemAssociations'!E2:E)),
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&'task-itemAssociations'!D2:D&"♦"&'task-itemAssociations'!E2:E, )), 1, 1), "♦")),
"select count(Col1) where Col1 is not null group by Col1 pivot Col2", 0)),
QUERY(IFERROR(SPLIT(SORT(UNIQUE(IF((LEN('task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&'task-itemAssociations'!D2:D))*(LEN('task-itemAssociations'!E2:E)),
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&'task-itemAssociations'!D2:D&"♦♥"&'task-itemAssociations'!E2:E, )), 1, 1), "♦")),
"select count(Col1) where Col1 is not null group by Col1 pivot Col2 limit 0", 0), )})
,,999^99))), "♥"))), {2}, 0)))
Formula 3: List all matches from taskData tab
This result is really all I need. I'm just not sure how else to arrive here without all of the above.
=QUERY(taskData!C2:O,"SELECT * where C = '"&E9&"'",0)
Ideally, this would be a single ARRAYFORMULA in Tasks!A2 (currently occupied by notes)
Here is my sheet
paste in A2 cell:
=FILTER(taskData!C2:O, REGEXMATCH(taskData!C2:C, TEXTJOIN("|", 1,
ARRAYFORMULA(IFERROR(VLOOKUP(
INDEX(QUERY(Estimate!A2:D50,"where C is not null",0),,1)&
INDEX(QUERY(Estimate!A2:D50,"where C is not null",0),,2)&
INDEX(QUERY(Estimate!A2:D50,"where C is not null",0),,3)&
INDEX(QUERY(Estimate!A2:D50,"where C is not null",0),,4),
TRIM(IFERROR(SPLIT(TRIM(TRANSPOSE(QUERY(TRANSPOSE(
{INDEX(QUERY(IFERROR(SPLIT(SORT(UNIQUE(IF((LEN(
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&
'task-itemAssociations'!D2:D))*(LEN('task-itemAssociations'!E2:E)),
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&
'task-itemAssociations'!D2:D&"♦"&'task-itemAssociations'!E2:E, )), 1, 1), "♦")),
"select Col1,count(Col1) where Col1 is not null group by Col1 pivot Col2", 0),,1), IF(
ISNUMBER(QUERY(IFERROR(SPLIT(SORT(UNIQUE(IF((LEN(
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&
'task-itemAssociations'!D2:D))*(LEN('task-itemAssociations'!E2:E)),
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&
'task-itemAssociations'!D2:D&"♦"&'task-itemAssociations'!E2:E, )), 1, 1), "♦")),
"select count(Col1) where Col1 is not null group by Col1 pivot Col2", 0)),
QUERY(IFERROR(SPLIT(SORT(UNIQUE(IF((LEN(
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&
'task-itemAssociations'!D2:D))*(LEN('task-itemAssociations'!E2:E)),
'task-itemAssociations'!A2:A&'task-itemAssociations'!B2:B&'task-itemAssociations'!C2:C&
'task-itemAssociations'!D2:D&"♦♥"&'task-itemAssociations'!E2:E, )), 1, 1), "♦")),
"select count(Col1) where Col1 is not null group by Col1 pivot Col2 limit 0", 0), )})
,,999^99))), "♥"))), {2}, 0))))))
Related
I am using Amazon Athena engine version 1, which is based on Presto 0.172.
Consider the example data set:
id
date_column
col1
1
01/03/2021
NULL
1
02/03/2021
1
1
15/03/2021
2
1
16/03/2021
NULL
1
17/03/2021
NULL
1
30/03/2021
NULL
1
30/03/2021
1
1
31/03/2021
NULL
I would like to replace all NULLs in the table with the last non-NULL value i.e. I want to get:
id
date_column
col1
1
01/03/2021
NULL
1
02/03/2021
1
1
15/03/2021
2
1
16/03/2021
2
1
17/03/2021
2
1
30/03/2021
1
1
30/03/2021
1
1
31/03/2021
1
I was thinking of using a lag function with IGNORE NULLS option but unfortunately, IGNORE NULLS is not supported by Athena engine version 1 (it is also not supported by Athena engine version 2, which is based on Presto 0.217).
How to achieve the desired format without using the IGNORE NULLS option?
Here is some template for generating the example table:
WITH source1 AS (
SELECT
*
FROM (
VALUES
(1, date('2021-03-01'), NULL),
(1, date('2021-03-02'), 1),
(1, date('2021-03-15'), 2),
(1, date('2021-03-16'), NULL),
(1, date('2021-03-17'), NULL),
(1, date('2021-03-30'), NULL),
(1, date('2021-03-30'), 1),
(1, date('2021-03-31'), NULL)
) AS t (id, date_col, col1)
)
SELECT
id
, date_col
, col1
-- This doesn't work as IGNORE NULLS is not supported.
-- CASE
-- WHEN col1 IS NOT NULL THEN col1
-- ELSE lag(col1) OVER IGNORE NULLS (PARTITION BY id ORDER BY date_col)
-- END AS col1_lag_nulls_ignored
FROM
source1
ORDER BY
date_co
After reviewing similar questions on SO (here and here), the below solution will work for all column types (including Strings and dates):
WITH source1 AS (
SELECT
*
FROM (
VALUES
(1, date('2021-03-01'), NULL),
(1, date('2021-03-02'), 1),
(1, date('2021-03-15'), 2),
(1, date('2021-03-16'), NULL),
(1, date('2021-03-17'), NULL),
(1, date('2021-03-30'), 1),
(1, date('2021-03-31'), NULL)
) AS t (id, date_col, col1)
)
, grouped AS (
SELECT
id
, date_col
, col1
-- If the row has a value in a column, then this row and all subsequent rows
-- with a NULL (before the next non-NULL value) will be in the same group.
, sum(CASE WHEN col1 IS NULL THEN 0 ELSE 1 END) OVER (
PARTITION BY id ORDER BY date_col) AS grp
FROM
source1
)
SELECT
id
, date_col
, col1
-- max is used instead of first_value, since in cases where there will
-- be multiple records with NULL on the same date, the first_value may
-- still return a NULL.
, max(col1) OVER (PARTITION BY id, grp ORDER BY date_col) AS col1_filled
, grp
FROM
grouped
ORDER BY
date_col
I have a document 'A' where I have a formula in a 'SEARCH' tab to search for a value from another 'ENTRIES' tab in Google Sheets based on a cell reference written by the user in the same 'SEARCH' tab than the formula.
If I duplicate the 'SEARCH' tab in another Google sheets document/book 'B', how should the formula be altered so that it still references 'A'?
The original formula is based on:
Search a value from another tab/sheet in google sheets based on cell reference
WORKING EXAMPLE HERE (This would be document 'B' and it tries to reference a search in another document 'A'): https://docs.google.com/spreadsheets/d/1Ffl6IbehI0slLChyuW-MDezF2xwt0rX12JNIaCFvEI8/edit?usp=sharing (You can see in cell B8 the formula with IMPORTRANGE that I'm trying to implement)
And this would be document'A'. Originally it is an example of how to search for values in another tab based in a different cell reference :
[https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit?usp=sharing]
I also checked:
Google Sheets VLOOKUP of multiple columns across multiple sheets
VLOOKUP to the left from another sheet in Google Sheets
Docs Editors help: IMPORTRANGE https://support.google.com/docs/answer/3093340
My original formula in book 'A' is:
=IFERROR(ARRAYFORMULA(
IF(B3<>"",SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(B3, {data!AN:AN, data!A:BN}, {41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(C3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(C3, {data!AK:AK, data!A:BN}, {41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(E3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(E3, {data!BJ:BJ, data!A:BN}, {41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(D3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(D3, {data!R:R, data!A:BN}, {41,38,19,11,55,56}, 0)),
CHAR(10))), "♦", ), ))))), "no match found")
I was trying to use : VLOOKUP(search_key, importrange, index, [is_sorted]) to get in an IMPORTRANGE, like:
IFERROR(ARRAYFORMULA(
IF(B3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(B3, {(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!AN:AN"),
(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(C3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(C3, {(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!AK:AK"),
(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(E3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(E3, {(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!BJ:BJ"),
(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(D3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(D3, {(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!R:R"),
(IMPORTRANGE("https://docs.google.com/spreadsheets/d/1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8/edit#gid=468700626","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ), ))))), "no match found")))))))))
It marks as ERROR, but even if I change it to be ... VLOOKUP(B3{(IMPORTRANGE("URL","data!R:R"),(IMPORTRANGE("URL","data!A:BN")}, ... that gets into () both references, it still marks ERROR.
Pharse Error is caused by extra parenthesis which breaks the formula. FX should be:
=IFERROR(ARRAYFORMULA(
IF(B3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(B3, {IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!AN:AN"),
IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(C3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(C3, {IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!AK:AK"),
IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(E3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(E3, {IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!BJ:BJ"),
IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ),
IF(D3<>"", SUBSTITUTE(TRANSPOSE(SPLIT(TEXTJOIN(CHAR(10)&"♦"&CHAR(10)&"♦", 1,
VLOOKUP(D3, {IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!R:R"),
IMPORTRANGE("1qLcJdCn4EdV7lPOAfZ_CMak1LBkve45FL5SXyqBV3L8","data!A:BN")},
{41,38,19,11,55,56}, 0)), CHAR(10))), "♦", ), ))))), "no match found")
I need to combine two queries that are both inside arrayformulas so that I just have one query:
I've tried using Union
First Code:
= ARRAYFORMULA(QUERY({MID(Sheet1!B1:B, 8, 5), Sheet1!A1:AS},
"select count(Col13)
where Col13>=0
group by Col1
label count(Col13)'Winners #'"))
Second Code:
= ARRAYFORMULA(QUERY({MID(Sheet1!B1:B, 8, 5), Sheet1!A1:AS},
"select count(Col13)
where Col13<=0
group by Col1
label count(Col13)'Losers #'"))
=ARRAYFORMULA(QUERY(REGEXREPLACE(TO_TEXT(QUERY({
QUERY({MONTH(MID('grouping project'!A2:A, 8, 3)&1)&"♦"&
MID('grouping project'!A2:A, 8, 5), 'grouping project'!A2:AO},
"select Col1,count(Col3),'Winners #'
where Col1 is not null
and Col3 >= 0
group by Col1
label count(Col3)'','Winners #'''", 0);
QUERY({MONTH(MID('grouping project'!A2:A, 8, 3)&1)&"♦"&
MID('grouping project'!A2:A, 8, 5), 'grouping project'!A2:AO},
"select Col1,count(Col3),'Loosers #'
where Col3 <= 0
and Col1 is not null
group by Col1
label count(Col3)'','Loosers #'''", 0)},
"select Col1,sum(Col2)
group by Col1
pivot Col3
label Col1'Week ending'", 0)), "^.+♦", ),
"where Col1 is not null", 0))
I have two dates:
'2018-01-05' and '2019-01-05'
How to create calculated table to break down those dates by month.
Should look simething like that:
There are probably many ways to do this, but here's one way that combines a few different concepts:
Table =
VAR Starting = DATE(2018, 1, 5)
VAR Ending = DATE(2019, 1, 5)
VAR MonthTable =
SUMMARIZE(
ADDCOLUMNS(
CALENDAR(Starting, Ending),
"StartDate", EOMONTH([Date], 0) + 1),
[StartDate],
"EndDate", EOMONTH([StartDate], 0) + 1)
RETURN UNION(
ROW("StartDate", Starting, "EndDate", EOMONTH(Starting, 0) + 1),
FILTER(MonthTable, [EndDate] < Ending && [StartDate] > Starting),
ROW("StartDate", EOMONTH(Ending, -1) + 1, "EndDate", Ending)
)
Basically, you start with the CALENDAR function to get all the days, tag each date with its corresponding month, and then summarize that table to just return one row for each month.
Since the first and last rows are a bit irregular, I prepended and appending those to a filtered version of the summarized month table to get your desired table.
Create new table as
Table = CALENDAR( DATE(2018, 5, 1), DATE(2019, 1, 5) - 1)
Rename auto-generated column "Date" into "Start Date". Add new column as
End Date = Start Date + 1
I have an app that run from multiple computers and must synchronize something between its internal database and one database from SQL server.
I use some temporary tables to insert internal database's data and then call an SP to synchronize data, it will process data row-by-row and then either update them in SQL database, insert new rows or delete dropped rows. Since I should support customers that have SQL server 2000, I should have a solution other than MERGE.
The problem is my SP work very well in SSMS but it suddenly fail when called from my application, I use C++ native code and use ODBC and SQL Native Client for connection with SQL server.
Here is of my database and SP definition:
IF (NOT EXISTS(SELECT * FROM master.dbo.sysdatabases WHERE name='TestDB1'))
CREATE DATABASE TestDB1;
GO
USE TestDB1;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='Servers'))
BEGIN
CREATE TABLE Servers(
[ID] uniqueidentifier NOT NULL PRIMARY KEY,
[Name] nvarchar(50)
-- Other fields omitted
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='P'))
BEGIN
CREATE TABLE [dbo].[P](
[ID] bigint NOT NULL,
[ServerID] uniqueidentifier NOT NULL
CONSTRAINT KK_P_Servers FOREIGN KEY REFERENCES [Servers],
[PName] nvarchar(255) NOT NULL,
-- Other fields omitted
CONSTRAINT PK_P PRIMARY KEY CLUSTERED ([ID], [ServerID])
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='C1'))
BEGIN
CREATE TABLE [dbo].[C1](
[ID] bigint NOT NULL,
[ServerID] uniqueidentifier NOT NULL
CONSTRAINT FK_C1_Servers FOREIGN KEY REFERENCES [Servers],
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
-- Other fields omitted
CONSTRAINT PK_C1 PRIMARY KEY CLUSTERED ([ID], [ServerID]),
CONSTRAINT FK_C1_P
FOREIGN KEY ([PID], [ServerID]) REFERENCES [P]
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='C2'))
BEGIN
CREATE TABLE [dbo].[C2](
[ID] bigint NOT NULL,
[ServerID] uniqueidentifier NOT NULL
CONSTRAINT FK_C2_Servers FOREIGN KEY REFERENCES [Servers],
[PID] bigint NULL,
[Name] nvarchar(255) NOT NULL
-- Other fields omitted
CONSTRAINT PK_C2 PRIMARY KEY CLUSTERED ([ID], [ServerID]),
CONSTRAINT FK_C2_P FOREIGN KEY ([PID], [ServerID]) REFERENCES [P]
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='debug'))
BEGIN
CREATE TABLE debug (
[id] int identity(1, 1),
[msg] nvarchar(255) NOT NULL,
[cnt] int
);
END;
GO
CREATE TABLE #C1(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
GO
CREATE TABLE #C2(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
GO
CREATE TABLE #P(
[ID] bigint NOT NULL PRIMARY KEY,
[PName] nvarchar(255) NOT NULL UNIQUE
-- Table have other fields that is not important here
);
GO
CREATE TABLE #C1(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
GO
CREATE TABLE #C2(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
GO
CREATE PROCEDURE #RegisterServer
#ServerId uniqueidentifier,
#ServerName nvarchar(128)
AS
BEGIN
BEGIN TRANSACTION
UPDATE [Servers]
SET [ServerName]=#ServerName
WHERE [ID]=#ServerId;
IF ##ROWCOUNT = 0
INSERT INTO [Servers](
[ID], [ServerName]
) VALUES (
#ServerId, #ServerName
);
COMMIT TRANSACTION
END
GO
CREATE PROCEDURE #DropP
#ServerID uniqueidentifier,
#PId bigint
AS
BEGIN
DELETE FROM C1
WHERE PID=#PId AND ServerID=#ServerID;
UPDATE C2 SET PID=NULL
WHERE PID=#PId AND ServerID=#ServerID;
DELETE FROM P
WHERE ID=#PId AND ServerID=#ServerID;
END
GO
CREATE PROCEDURE #SynchronizeP
#ServerID uniqueidentifier
AS
BEGIN
DECLARE #rc int, #e int;
DECLARE #AllP TABLE (
[num] bigint IDENTITY(1, 1) PRIMARY KEY,
[ID] bigint NOT NULL,
[PName] nvarchar(255) NOT NULL
);
DECLARE #AllC1 TABLE (
[num] bigint IDENTITY(1, 1) PRIMARY KEY,
[ID] bigint NOT NULL,
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
DECLARE #AllC2 TABLE (
[num] bigint IDENTITY(1, 1) PRIMARY KEY,
[ID] bigint NOT NULL,
[PID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
DELETE FROM debug;
INSERT INTO #AllP( [ID], [PName] )
SELECT [ID], [PName]
FROM #ServerP;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'CREATE #AllP', #rc );
INSERT INTO #AllC1( [ID], [PID], [Type] )
SELECT [ID], [PID], [Type]
FROM #ServerC1;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'CREATE #AllC1', #rc );
INSERT INTO #AllC2( [ID], [PID], [Name] )
SELECT [ID], [PID], [Name]
FROM #ServerC2;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'CREATE #AllC2', #rc );
DECLARE #PCount int
SELECT #PCount = COUNT(*) FROM #AllP
INSERT INTO debug VALUES( 'Read count of #AllP', #PCount );
BEGIN TRANSACTION;
DECLARE #PId bigint, #PName nvarchar(255);
-- find dropped c1 and delete them
DELETE FROM [C1]
WHERE [ServerID]=#ServerID AND ([ID] NOT IN (SELECT a.[ID] FROM #AllC1 a));
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Delete invalid c1', #rc );
-- find dropped c2 and abandon them
UPDATE [C2] SET [PID]=NULL
WHERE [ServerID]=#ServerID AND ([ID] NOT IN (SELECT a.[ID] FROM #AllC2 a));
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Abandon invalid c2', #rc );
-- find dropped p and delete them
DELETE FROM [P]
WHERE [ServerID]=#ServerID AND ([ID] NOT IN (SELECT a.[ID] FROM #AllP a));
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Delete invalid p', #rc );
-- insert or update server p into database
DECLARE #p int
SET #p = 1
WHILE #p <= #PCount
BEGIN
SELECT #PId=[ID], #PName=[PName]
FROM #AllP
WHERE [num] = #p;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Select a p ' +
CASE #PId WHEN NULL THEN 'NULL' ELSE CONVERT(nvarchar(5), #PId) END + '|' +
CASE #PName WHEN NULL THEN 'NULL' ELSE #PName END, #rc );
-- update or add this processor
UPDATE dbo.[P]
SET [PName]=#PName
WHERE [ServerID]=#ServerID AND [ID]=#PId;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Update p', #rc );
IF #rc = 0
BEGIN
INSERT INTO dbo.[P](
[ID], [ServerID], [PName]
) VALUES(
#PId, #ServerID, #PName
);
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Insert p', #rc );
END;
-- Now update list of c1 that belong to this processor
DECLARE #TmpC1 TABLE (
[num] bigint identity(1, 1) primary key,
[ID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
INSERT INTO #TmpC1( [ID], [Type] )
SELECT [ID], [Type]
FROM #AllC1
WHERE [PID] = #PId;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Create #TmpC1', #rc );
DECLARE #Test nvarchar(4000);
SELECT #Test = '';
SELECT #Test = #Test + CONVERT(nvarchar(5), [ID]) + ', '
FROM #TmpC1;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( '#TmpC1: ' + #Test, #rc );
DECLARE #C1Count int, #C1 int;
SELECT #C1Count = COUNT(*) FROM #TmpC1;
INSERT INTO debug VALUES( '#TmpC1.Count', #C1Count );
SET #C1 = 1
WHILE #C1 <= #C1Count
BEGIN
DECLARE #C1Id bigint, #C1Type nvarchar(50);
SELECT #C1Id=[ID], #C1Type=[Type]
FROM #TmpC1
WHERE [num] = #C1;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Read c1: ' +
CASE #C1Id WHEN NULL THEN 'NULL' ELSE CONVERT(nvarchar(5), #C1Id) END + '|' +
CASE #C1Type WHEN NULL THEN 'NULL' ELSE #C1Type END, #rc );
UPDATE C1
SET [PID]=#PId, [Type]=#C1Type
WHERE [ID]=#C1Id AND [ServerID]=#ServerID;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Update c1', #rc );
IF #rc = 0
BEGIN
INSERT INTO C1(
[ID], [ServerID], [PID], [Type]
) VALUES (
#C1Id, #ServerID, #PId, #C1Type
);
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Insert c1', #rc );
END;
SET #C1 = #C1 + 1;
END;
DELETE FROM #TmpC1;
-- And at last insert or update c2 of this processor
DECLARE #TmpC2 TABLE (
[num] bigint identity(1, 1) primary key,
[ID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
INSERT INTO #TmpC2( [ID], [Name] )
SELECT [ID], [Name]
FROM #AllC2
WHERE [PID] = #PId;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Create #TmpC2', #rc );
SELECT #Test = '';
SELECT #Test = #Test + CONVERT(nvarchar(5), [ID]) + ', '
FROM #TmpC2;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( '#TmpC2: ' + #Test, #rc );
DECLARE #C2Count int, #C2 int;
SELECT #C2Count = COUNT(*) FROM #TmpC2;
INSERT INTO debug VALUES( '#TmpC2.Count', #C2Count );
SET #C2 = 1
WHILE #C2 <= #C2Count
BEGIN
DECLARE #C2Id bigint, #C2Name nvarchar(255);
SELECT #C2Id=[ID], #C2Name=[Name]
FROM #TmpC2
WHERE [num] = #C2;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Read c2: ' +
CASE #C2Id WHEN NULL THEN 'NULL' ELSE CONVERT(nvarchar(5), #C2Id) END + '|' +
CASE #C2Name WHEN NULL THEN 'NULL' ELSE #C2Name END, #rc );
UPDATE [C2]
SET [PID]=#PId, [Name]=#C2Name
WHERE [ID]=#C2Id AND ServerID=#ServerID;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Update c2', #rc );
IF #rc = 0
BEGIN
INSERT INTO debug VALUES( 'Inserting channel: ' +
CONVERT(nvarchar(5), #C2Id) + '|' +
CONVERT(nvarchar(50), #ServerId) + '|' +
CONVERT(nvarchar(5), #PId), 0 );
INSERT INTO [C2] (
[ID], [ServerID], [PID], [Name]
) VALUES (
#C2Id, #ServerID, #PId, #C2Name
);
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Insert c2', #rc );
END;
INSERT INTO debug VALUES( 'To next c2', #C2 );
SET #C2 = #C2 + 1;
INSERT INTO debug VALUES( 'Next c2', #C2 );
END;
DELETE FROM #TmpC2;
SET #p = #p + 1;
END;
COMMIT TRANSACTION;
END
GO
Each time that I execute #SynchronizeP from C++ app I get a sudden error somewhere in between the SP and transaction will be failed, but executing the code in SSMS is perfect.
I tried anything but I can come up with an answer!!
Here is my sample data that I work with it
INSERT INTO #P( [ID, [Name] ) VALUES
( 1, 'p1' )
( 2, 'p2' )
( 3, 'p3' )
GO
INSERT INTO #C1( [ID], [PID], [Type] ) VALUES
( 1, 1, 'T1' )
( 2, 1, 'T2' )
( 3, 2, 'T3' )
( 4, 2, 'T4' )
( 5, 3, 'T5' )
( 6, 3, 'T6' )
GO
INSERT INTO #C2( [ID], [PID], [Name] ) VALUES
( 1, 1, 'C2_01' )
( 2, 1, 'C2_02' )
( 3, 1, 'C2_03' )
( 4, 1, 'C2_04' )
( 5, 1, 'C2_05' )
( 6, 1, 'C2_06' )
( 7, 1, 'C2_07' )
( 8, 1, 'C2_08' )
( 9, 1, 'C2_09' )
(10, 1, 'C2_10' )
(11, 1, 'C2_11' )
(12, 1, 'C2_12' )
(13, 1, 'C2_13' )
(14, 1, 'C2_14' )
(15, 1, 'C2_15' )
(16, 1, 'C2_16' )
(17, 1, 'C2_17' )
(18, 1, 'C2_18' )
(19, 1, 'C2_19' )
(20, 1, 'C2_20' )
(21, 1, 'C2_21' )
(22, 1, 'C2_22' )
(23, 1, 'C2_23' )
(24, 1, 'C2_24' )
(25, 1, 'C2_25' )
(26, 1, 'C2_26' )
(27, 1, 'C2_27' )
(28, 1, 'C2_28' )
(29, 1, 'C2_29' )
(30, 1, 'C2_30' )
(31, 2, 'C2_31' )
(32, 2, 'C2_32' )
(33, 2, 'C2_33' )
(34, 2, 'C2_34' )
(35, 2, 'C2_35' )
(36, 2, 'C2_36' )
(37, 2, 'C2_37' )
(38, 2, 'C2_38' )
(39, 2, 'C2_39' )
(40, 2, 'C2_40' )
(41, 2, 'C2_41' )
(42, 2, 'C2_42' )
(43, 2, 'C2_43' )
(44, 2, 'C2_44' )
(45, 2, 'C2_45' )
(46, 2, 'C2_46' )
(47, 2, 'C2_47' )
(48, 2, 'C2_48' )
(49, 2, 'C2_49' )
(50, 2, 'C2_50' )
(51, 2, 'C2_51' )
(52, 2, 'C2_52' )
(53, 2, 'C2_53' )
(54, 2, 'C2_54' )
(55, 2, 'C2_55' )
(56, 2, 'C2_56' )
(57, 2, 'C2_57' )
(58, 2, 'C2_58' )
(59, 2, 'C2_59' )
(60, 2, 'C2_60' )
(61, 3, 'C2_61' )
(62, 3, 'C2_62' )
(63, 3, 'C2_63' )
(64, 3, 'C2_64' )
(65, 3, 'C2_65' )
(66, 3, 'C2_66' )
(67, 3, 'C2_67' )
(68, 3, 'C2_68' )
(69, 3, 'C2_69' )
(70, 3, 'C2_70' )
(71, 3, 'C2_71' )
(72, 3, 'C2_72' )
(73, 3, 'C2_73' )
(74, 3, 'C2_74' )
(75, 3, 'C2_75' )
(76, 3, 'C2_76' )
(77, 3, 'C2_77' )
(78, 3, 'C2_78' )
(79, 3, 'C2_79' )
(80, 3, 'C2_80' )
(81, 3, 'C2_81' )
(82, 3, 'C2_82' )
(83, 3, 'C2_83' )
(84, 3, 'C2_84' )
(85, 3, 'C2_85' )
(86, 3, 'C2_86' )
(87, 3, 'C2_87' )
(88, 3, 'C2_88' )
(89, 3, 'C2_89' )
(90, 3, 'C2_90' )
GO
EXEC #SynchronizeP
GO
Edit: Oh my God!! I can't believe it, I add SET NOCOUNT ON to start of my SP and every thing go as expected!! does anyone knows why?? why a message that indicate count of affected row break execution of my SP.
I known that in most cases this is a good idea to add SET NOCOUNT ON in start of SP( for performance ) but why forgetting to add it break my SP??
By prefixing your SP with a #, you have made it temporary. So it probably doesn't exist when you call it from a different session in your C++ program
I think the answer is ODBC will close or cancel the command when it receive first answer from the SQL, so if I forget to use SET NOCOUNT ON and SQL send count notifications, ODBC will cancel the command. May be there is some technique to enable multiple result set for an SQL command in ODBC but I don't know such a technique