convert from sql to presto - amazon-athena

This what I converted to but I am getting below
error - line 8:8: mismatched input 'else'. Expecting: ',',
SELECT
regdate,
date_format(regdate, '%Y-%m') as YearMonth,
date_format(regdate, '%m') as RegMonth,
date_format(regdate, '%Y') as RegYear --
,
case when cast(
year(RegDate) as varchar
)
and case when len(
cast(
month(RegDate) as varchar
)
) = 1 then concat(
'0',
cast(
month(RegDate) as varchar
) else cast(
month(RegDate) as varchar
) end < concat('20', code, '01') then 'Jan' when cast(
year(RegDate) as varchar
) concat(
case when len(
cast(
month(RegDate) as varchar
)
) = 1 then concat(
'0',
cast(
month(RegDate) as varchar
) else cast(
month(RegDate) as varchar
) end < concat('20', code, '12') then 'Dec' else Convert(
varchar(3),
RegDate,
0
) end EditionMonth
FROM
details

Related

PowerBI RANKX is not continious

I want to show the TOP10 difference in a measure.
The difference is calculated YTD actual + Rest of the year forecast - Full year budget values.
The normal measure looks like this:
VAR _Year =
SELECTEDVALUE ( 'Calendar'[Year] )
RETURN
(
CALCULATE (
SELECTEDMEASURE (),
DATESYTD ( Calendar[Dates] ),
CRDB[Scenario] = "Actual",
ALL ( CRDB[ForecastTypeFinal] )
)
+ CALCULATE (
SELECTEDMEASURE (),
CRDB[Scenario] = "Forecast",
'Calendar'[Dates] >= DATE ( _Year, 1, 1 )
&& 'Calendar'[Dates] <= DATE ( _Year, 12, 31 )
)
)
- CALCULATE (
SELECTEDMEASURE (),
Calendar[Dates] >= DATE ( _Year, 1, 1 )
&& Calendar[Dates] <= DATE ( _Year, 12, 31 ),
CRDB[Scenario] = "Budget",
ALL ( CRDB[ForecastTypeFinal] )
)
I would like to rank by project, so I made this ranking measure:
RANKX (
ALL ( CRDB[Project ID - Project ID Level 01 (Text)] ),
(
CALCULATE (
SELECTEDMEASURE (),
DATESYTD ( Calendar[Dates] ),
CRDB[Scenario] = "Actual",
ALL ( CRDB[ForecastTypeFinal] )
)
+ CALCULATE (
SELECTEDMEASURE (),
CRDB[Scenario] = "Forecast",
'Calendar'[Dates] >= DATE ( SELECTEDVALUE ( 'Calendar'[Year] ), 1, 1 )
&& 'Calendar'[Dates] <= DATE ( SELECTEDVALUE ( 'Calendar'[Year] ), 12, 31 )
)
)
- CALCULATE (
SELECTEDMEASURE (),
Calendar[Dates] >= DATE ( SELECTEDVALUE ( 'Calendar'[Year] ), 1, 1 )
&& Calendar[Dates] <= DATE ( SELECTEDVALUE ( 'Calendar'[Year] ), 12, 31 ),
CRDB[Scenario] = "Budget",
ALL ( CRDB[ForecastTypeFinal] )
),
,
DESC
))
The ranking gets me the correct projects, but when I look at the rank values, they are not right:
ranking values
The values are not the same, so it is no reason for skipping places.
What do I do wrong?
Thank you for your help in advance.

How to export BigQuery table schema as DDL

I need to create BigQuery table with the same schema as in existing one.
In standard MySql there is SHOW CREATE TABLE, is there something similar for BigQuery?
SELECT
table_name, ddl
FROM
`bigquery-public-data`.census_bureau_usa.INFORMATION_SCHEMA.TABLES;
https://cloud.google.com/blog/topics/developers-practitioners/spring-forward-bigquery-user-friendly-sql
Nothing similar to the SHOW CREATE TABLE from MySQL, but it is possible with the use of UDFs to generate the DDL statements of your tables in a dataset...
Use the following script and make sure to replace 'mydataset' with yours. You can even add a WHERE predicate to output only specific table DDL
Copy the output of the desired table and paste it in a new Compose Query Window and give it a new table name!
CREATE TEMP FUNCTION MakePartitionByExpression(
column_name STRING, data_type STRING
) AS (
IF(
column_name = '_PARTITIONTIME',
'DATE(_PARTITIONTIME)',
IF(
data_type = 'TIMESTAMP',
CONCAT('DATE(', column_name, ')'),
column_name
)
)
);
CREATE TEMP FUNCTION MakePartitionByClause(
columns ARRAY<STRUCT<column_name STRING, data_type STRING, is_nullable STRING, is_partitioning_column STRING, clustering_ordinal_position INT64>>
) AS (
IFNULL(
CONCAT(
'PARTITION BY ',
(SELECT MakePartitionByExpression(column_name, data_type)
FROM UNNEST(columns) WHERE is_partitioning_column = 'YES'),
'\n'),
''
)
);
CREATE TEMP FUNCTION MakeClusterByClause(
columns ARRAY<STRUCT<column_name STRING, data_type STRING, is_nullable STRING, is_partitioning_column STRING, clustering_ordinal_position INT64>>
) AS (
IFNULL(
CONCAT(
'CLUSTER BY ',
(SELECT STRING_AGG(column_name, ', ' ORDER BY clustering_ordinal_position)
FROM UNNEST(columns) WHERE clustering_ordinal_position IS NOT NULL),
'\n'
),
''
)
);
CREATE TEMP FUNCTION MakeNullable(data_type STRING, is_nullable STRING)
AS (
IF(not STARTS_WITH(data_type, 'ARRAY<') and is_nullable = 'NO', ' NOT NULL', '')
);
CREATE TEMP FUNCTION MakeColumnList(
columns ARRAY<STRUCT<column_name STRING, data_type STRING, is_nullable STRING, is_partitioning_column STRING, clustering_ordinal_position INT64>>
) AS (
IFNULL(
CONCAT(
'(\n',
(SELECT STRING_AGG(CONCAT(' ', column_name, ' ', data_type, MakeNullable(data_type, is_nullable)), ',\n')
FROM UNNEST(columns)),
'\n)\n'
),
''
)
);
CREATE TEMP FUNCTION MakeOptionList(
options ARRAY<STRUCT<option_name STRING, option_value STRING>>
) AS (
IFNULL(
CONCAT(
'OPTIONS (\n',
(SELECT STRING_AGG(CONCAT(' ', option_name, '=', option_value), ',\n') FROM UNNEST(options)),
'\n)\n'),
''
)
);
WITH Components AS (
SELECT
CONCAT('`', table_catalog, '.', table_schema, '.', table_name, '`') AS table_name,
ARRAY_AGG(
STRUCT(column_name, data_type, is_nullable, is_partitioning_column, clustering_ordinal_position)
ORDER BY ordinal_position
) AS columns,
(SELECT ARRAY_AGG(STRUCT(option_name, option_value))
FROM mydataset.INFORMATION_SCHEMA.TABLE_OPTIONS AS t2
WHERE t.table_name = t2.table_name) AS options
FROM mydataset.INFORMATION_SCHEMA.TABLES AS t
LEFT JOIN mydataset.INFORMATION_SCHEMA.COLUMNS
USING (table_catalog, table_schema, table_name)
WHERE table_type = 'BASE TABLE'
GROUP BY table_catalog, table_schema, t.table_name
)
SELECT
CONCAT(
'CREATE OR REPLACE TABLE ',
table_name,
'\n',
MakeColumnList(columns),
MakePartitionByClause(columns),
MakeClusterByClause(columns),
MakeOptionList(options))
FROM Components
For more info check -> Getting table metadata using INFORMATION_SCHEMA https://cloud.google.com/bigquery/docs/information-schema-tables
... to create BigQuery table with the same schema as in existing one
You can use below "trick" with your new table as destination (trick here is in using WHERE FALSE which makes below query free of cost with 0 rows in output while preserving schema)
#standardSQL
SELECT *
FROM `project.dataset.existing_table`
WHERE FALSE
Or you can use above statement in CTAS (CREATE TABLE AS SELECT) type of DDL

Create a new column and have it display in number values

I have 1 file which has the following columns: Document Date and Disposition Date.
In Power BI Desktop, I'd like to create another column called 'Duration' which can be calculated by taking Disposition Date - Document Date and I want the new column to display in number values since both the Disposition Date & Document Date are either in serial number (ex: 39448) or date (ex: 09/25/2018) format.
Is there a code or something to do this? Thank you!
I may be missing the point here, but if you have a dataset such as this:
Document Disposition
25.09.2018 22.09.2018
24.09.2018 21.09.2018
23.09.2018 20.09.2018
22.09.2018 19.09.2018
21.09.2018 18.09.2018
20.09.2018 17.09.2018
19.09.2018 16.09.2018
18.09.2018 14.09.2018
17.09.2018 13.09.2018
16.09.2018 12.09.2018
15.09.2018 11.09.2018
14.09.2018 10.09.2018
13.09.2018 09.09.2018
12.09.2018 08.09.2018
11.09.2018 07.09.2018
10.09.2018 06.09.2018
09.09.2018 05.09.2018
08.09.2018 04.09.2018
Then you can load them using Get Data, go to Edit Queries, select Add Column, and simply set it up like this:
Then you can click the ABC / 123 icon on top of the column and change the datatype to Whole number, and you'll get this:
Please let me know if this is not what you were looking for.
First create two new date columns for document and disposition since there are some variances in datatype. I am basically just checking if after conversion, there is a "/" in the date field implying it is a date type, if not I am assuming it is serialized and will convert. The following DAX should do it BUT it is not tested, so try it out.
True Document Date :=
SWITCH (
TRUE (),
AND (
ISERROR ( SEARCH ( "/", FORMAT ( [Document], "text" ) ) ),
[Document] >= 32767
), FORMAT ( DATE ( 2000, 1, [Document] - 36523 ), "YYYY-MM-DD" ),
AND (
ISERROR ( SEARCH ( "/", FORMAT ( [Document], "text" ) ) ),
[Document] < 32767
), FORMAT ( DATE ( 1900, 1, Sheet1[DATE SERIAL NUMBER] ), "YYYY-MM-DD" ),
NOT ( ISERROR ( SEARCH ( "/", FORMAT ( [Document], "text" ) ) ) ), [Document]
)
True Disposition Date :=
SWITCH (
TRUE (),
AND (
ISERROR ( SEARCH ( "/", FORMAT ( [Disposition], "text" ) ) ),
[Disposition] >= 32767
), FORMAT ( DATE ( 2000, 1, [Disposition] - 36523 ), "YYYY-MM-DD" ),
AND (
ISERROR ( SEARCH ( "/", FORMAT ( [Disposition], "text" ) ) ),
[Disposition] < 32767
), FORMAT ( DATE ( 1900, 1, Sheet1[DATE SERIAL NUMBER] ), "YYYY-MM-DD" ),
NOT ( ISERROR ( SEARCH ( "/", FORMAT ( [Disposition], "text" ) ) ) ), [Disposition]
)
Then, just take the difference in days and store results a new calculated column:
Date Delta :=
DATEDIFF ( [True Document Date], [True Disposition Date], DAY )

SQL SP work correctly in SSMS but fail in VC++ application

I have an app that run from multiple computers and must synchronize something between its internal database and one database from SQL server.
I use some temporary tables to insert internal database's data and then call an SP to synchronize data, it will process data row-by-row and then either update them in SQL database, insert new rows or delete dropped rows. Since I should support customers that have SQL server 2000, I should have a solution other than MERGE.
The problem is my SP work very well in SSMS but it suddenly fail when called from my application, I use C++ native code and use ODBC and SQL Native Client for connection with SQL server.
Here is of my database and SP definition:
IF (NOT EXISTS(SELECT * FROM master.dbo.sysdatabases WHERE name='TestDB1'))
CREATE DATABASE TestDB1;
GO
USE TestDB1;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='Servers'))
BEGIN
CREATE TABLE Servers(
[ID] uniqueidentifier NOT NULL PRIMARY KEY,
[Name] nvarchar(50)
-- Other fields omitted
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='P'))
BEGIN
CREATE TABLE [dbo].[P](
[ID] bigint NOT NULL,
[ServerID] uniqueidentifier NOT NULL
CONSTRAINT KK_P_Servers FOREIGN KEY REFERENCES [Servers],
[PName] nvarchar(255) NOT NULL,
-- Other fields omitted
CONSTRAINT PK_P PRIMARY KEY CLUSTERED ([ID], [ServerID])
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='C1'))
BEGIN
CREATE TABLE [dbo].[C1](
[ID] bigint NOT NULL,
[ServerID] uniqueidentifier NOT NULL
CONSTRAINT FK_C1_Servers FOREIGN KEY REFERENCES [Servers],
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
-- Other fields omitted
CONSTRAINT PK_C1 PRIMARY KEY CLUSTERED ([ID], [ServerID]),
CONSTRAINT FK_C1_P
FOREIGN KEY ([PID], [ServerID]) REFERENCES [P]
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='C2'))
BEGIN
CREATE TABLE [dbo].[C2](
[ID] bigint NOT NULL,
[ServerID] uniqueidentifier NOT NULL
CONSTRAINT FK_C2_Servers FOREIGN KEY REFERENCES [Servers],
[PID] bigint NULL,
[Name] nvarchar(255) NOT NULL
-- Other fields omitted
CONSTRAINT PK_C2 PRIMARY KEY CLUSTERED ([ID], [ServerID]),
CONSTRAINT FK_C2_P FOREIGN KEY ([PID], [ServerID]) REFERENCES [P]
);
END;
GO
IF (NOT EXISTS( SELECT * FROM dbo.sysobjects WHERE name='debug'))
BEGIN
CREATE TABLE debug (
[id] int identity(1, 1),
[msg] nvarchar(255) NOT NULL,
[cnt] int
);
END;
GO
CREATE TABLE #C1(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
GO
CREATE TABLE #C2(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
GO
CREATE TABLE #P(
[ID] bigint NOT NULL PRIMARY KEY,
[PName] nvarchar(255) NOT NULL UNIQUE
-- Table have other fields that is not important here
);
GO
CREATE TABLE #C1(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
GO
CREATE TABLE #C2(
[ID] bigint NOT NULL PRIMARY KEY,
[PID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
GO
CREATE PROCEDURE #RegisterServer
#ServerId uniqueidentifier,
#ServerName nvarchar(128)
AS
BEGIN
BEGIN TRANSACTION
UPDATE [Servers]
SET [ServerName]=#ServerName
WHERE [ID]=#ServerId;
IF ##ROWCOUNT = 0
INSERT INTO [Servers](
[ID], [ServerName]
) VALUES (
#ServerId, #ServerName
);
COMMIT TRANSACTION
END
GO
CREATE PROCEDURE #DropP
#ServerID uniqueidentifier,
#PId bigint
AS
BEGIN
DELETE FROM C1
WHERE PID=#PId AND ServerID=#ServerID;
UPDATE C2 SET PID=NULL
WHERE PID=#PId AND ServerID=#ServerID;
DELETE FROM P
WHERE ID=#PId AND ServerID=#ServerID;
END
GO
CREATE PROCEDURE #SynchronizeP
#ServerID uniqueidentifier
AS
BEGIN
DECLARE #rc int, #e int;
DECLARE #AllP TABLE (
[num] bigint IDENTITY(1, 1) PRIMARY KEY,
[ID] bigint NOT NULL,
[PName] nvarchar(255) NOT NULL
);
DECLARE #AllC1 TABLE (
[num] bigint IDENTITY(1, 1) PRIMARY KEY,
[ID] bigint NOT NULL,
[PID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
DECLARE #AllC2 TABLE (
[num] bigint IDENTITY(1, 1) PRIMARY KEY,
[ID] bigint NOT NULL,
[PID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
DELETE FROM debug;
INSERT INTO #AllP( [ID], [PName] )
SELECT [ID], [PName]
FROM #ServerP;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'CREATE #AllP', #rc );
INSERT INTO #AllC1( [ID], [PID], [Type] )
SELECT [ID], [PID], [Type]
FROM #ServerC1;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'CREATE #AllC1', #rc );
INSERT INTO #AllC2( [ID], [PID], [Name] )
SELECT [ID], [PID], [Name]
FROM #ServerC2;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'CREATE #AllC2', #rc );
DECLARE #PCount int
SELECT #PCount = COUNT(*) FROM #AllP
INSERT INTO debug VALUES( 'Read count of #AllP', #PCount );
BEGIN TRANSACTION;
DECLARE #PId bigint, #PName nvarchar(255);
-- find dropped c1 and delete them
DELETE FROM [C1]
WHERE [ServerID]=#ServerID AND ([ID] NOT IN (SELECT a.[ID] FROM #AllC1 a));
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Delete invalid c1', #rc );
-- find dropped c2 and abandon them
UPDATE [C2] SET [PID]=NULL
WHERE [ServerID]=#ServerID AND ([ID] NOT IN (SELECT a.[ID] FROM #AllC2 a));
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Abandon invalid c2', #rc );
-- find dropped p and delete them
DELETE FROM [P]
WHERE [ServerID]=#ServerID AND ([ID] NOT IN (SELECT a.[ID] FROM #AllP a));
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Delete invalid p', #rc );
-- insert or update server p into database
DECLARE #p int
SET #p = 1
WHILE #p <= #PCount
BEGIN
SELECT #PId=[ID], #PName=[PName]
FROM #AllP
WHERE [num] = #p;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Select a p ' +
CASE #PId WHEN NULL THEN 'NULL' ELSE CONVERT(nvarchar(5), #PId) END + '|' +
CASE #PName WHEN NULL THEN 'NULL' ELSE #PName END, #rc );
-- update or add this processor
UPDATE dbo.[P]
SET [PName]=#PName
WHERE [ServerID]=#ServerID AND [ID]=#PId;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Update p', #rc );
IF #rc = 0
BEGIN
INSERT INTO dbo.[P](
[ID], [ServerID], [PName]
) VALUES(
#PId, #ServerID, #PName
);
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Insert p', #rc );
END;
-- Now update list of c1 that belong to this processor
DECLARE #TmpC1 TABLE (
[num] bigint identity(1, 1) primary key,
[ID] bigint NOT NULL,
[Type] nvarchar(50) NOT NULL
);
INSERT INTO #TmpC1( [ID], [Type] )
SELECT [ID], [Type]
FROM #AllC1
WHERE [PID] = #PId;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Create #TmpC1', #rc );
DECLARE #Test nvarchar(4000);
SELECT #Test = '';
SELECT #Test = #Test + CONVERT(nvarchar(5), [ID]) + ', '
FROM #TmpC1;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( '#TmpC1: ' + #Test, #rc );
DECLARE #C1Count int, #C1 int;
SELECT #C1Count = COUNT(*) FROM #TmpC1;
INSERT INTO debug VALUES( '#TmpC1.Count', #C1Count );
SET #C1 = 1
WHILE #C1 <= #C1Count
BEGIN
DECLARE #C1Id bigint, #C1Type nvarchar(50);
SELECT #C1Id=[ID], #C1Type=[Type]
FROM #TmpC1
WHERE [num] = #C1;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Read c1: ' +
CASE #C1Id WHEN NULL THEN 'NULL' ELSE CONVERT(nvarchar(5), #C1Id) END + '|' +
CASE #C1Type WHEN NULL THEN 'NULL' ELSE #C1Type END, #rc );
UPDATE C1
SET [PID]=#PId, [Type]=#C1Type
WHERE [ID]=#C1Id AND [ServerID]=#ServerID;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Update c1', #rc );
IF #rc = 0
BEGIN
INSERT INTO C1(
[ID], [ServerID], [PID], [Type]
) VALUES (
#C1Id, #ServerID, #PId, #C1Type
);
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Insert c1', #rc );
END;
SET #C1 = #C1 + 1;
END;
DELETE FROM #TmpC1;
-- And at last insert or update c2 of this processor
DECLARE #TmpC2 TABLE (
[num] bigint identity(1, 1) primary key,
[ID] bigint NOT NULL,
[Name] nvarchar(255) NOT NULL
);
INSERT INTO #TmpC2( [ID], [Name] )
SELECT [ID], [Name]
FROM #AllC2
WHERE [PID] = #PId;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Create #TmpC2', #rc );
SELECT #Test = '';
SELECT #Test = #Test + CONVERT(nvarchar(5), [ID]) + ', '
FROM #TmpC2;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( '#TmpC2: ' + #Test, #rc );
DECLARE #C2Count int, #C2 int;
SELECT #C2Count = COUNT(*) FROM #TmpC2;
INSERT INTO debug VALUES( '#TmpC2.Count', #C2Count );
SET #C2 = 1
WHILE #C2 <= #C2Count
BEGIN
DECLARE #C2Id bigint, #C2Name nvarchar(255);
SELECT #C2Id=[ID], #C2Name=[Name]
FROM #TmpC2
WHERE [num] = #C2;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Read c2: ' +
CASE #C2Id WHEN NULL THEN 'NULL' ELSE CONVERT(nvarchar(5), #C2Id) END + '|' +
CASE #C2Name WHEN NULL THEN 'NULL' ELSE #C2Name END, #rc );
UPDATE [C2]
SET [PID]=#PId, [Name]=#C2Name
WHERE [ID]=#C2Id AND ServerID=#ServerID;
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Update c2', #rc );
IF #rc = 0
BEGIN
INSERT INTO debug VALUES( 'Inserting channel: ' +
CONVERT(nvarchar(5), #C2Id) + '|' +
CONVERT(nvarchar(50), #ServerId) + '|' +
CONVERT(nvarchar(5), #PId), 0 );
INSERT INTO [C2] (
[ID], [ServerID], [PID], [Name]
) VALUES (
#C2Id, #ServerID, #PId, #C2Name
);
SELECT #rc = ##ROWCOUNT;
INSERT INTO debug VALUES( 'Insert c2', #rc );
END;
INSERT INTO debug VALUES( 'To next c2', #C2 );
SET #C2 = #C2 + 1;
INSERT INTO debug VALUES( 'Next c2', #C2 );
END;
DELETE FROM #TmpC2;
SET #p = #p + 1;
END;
COMMIT TRANSACTION;
END
GO
Each time that I execute #SynchronizeP from C++ app I get a sudden error somewhere in between the SP and transaction will be failed, but executing the code in SSMS is perfect.
I tried anything but I can come up with an answer!!
Here is my sample data that I work with it
INSERT INTO #P( [ID, [Name] ) VALUES
( 1, 'p1' )
( 2, 'p2' )
( 3, 'p3' )
GO
INSERT INTO #C1( [ID], [PID], [Type] ) VALUES
( 1, 1, 'T1' )
( 2, 1, 'T2' )
( 3, 2, 'T3' )
( 4, 2, 'T4' )
( 5, 3, 'T5' )
( 6, 3, 'T6' )
GO
INSERT INTO #C2( [ID], [PID], [Name] ) VALUES
( 1, 1, 'C2_01' )
( 2, 1, 'C2_02' )
( 3, 1, 'C2_03' )
( 4, 1, 'C2_04' )
( 5, 1, 'C2_05' )
( 6, 1, 'C2_06' )
( 7, 1, 'C2_07' )
( 8, 1, 'C2_08' )
( 9, 1, 'C2_09' )
(10, 1, 'C2_10' )
(11, 1, 'C2_11' )
(12, 1, 'C2_12' )
(13, 1, 'C2_13' )
(14, 1, 'C2_14' )
(15, 1, 'C2_15' )
(16, 1, 'C2_16' )
(17, 1, 'C2_17' )
(18, 1, 'C2_18' )
(19, 1, 'C2_19' )
(20, 1, 'C2_20' )
(21, 1, 'C2_21' )
(22, 1, 'C2_22' )
(23, 1, 'C2_23' )
(24, 1, 'C2_24' )
(25, 1, 'C2_25' )
(26, 1, 'C2_26' )
(27, 1, 'C2_27' )
(28, 1, 'C2_28' )
(29, 1, 'C2_29' )
(30, 1, 'C2_30' )
(31, 2, 'C2_31' )
(32, 2, 'C2_32' )
(33, 2, 'C2_33' )
(34, 2, 'C2_34' )
(35, 2, 'C2_35' )
(36, 2, 'C2_36' )
(37, 2, 'C2_37' )
(38, 2, 'C2_38' )
(39, 2, 'C2_39' )
(40, 2, 'C2_40' )
(41, 2, 'C2_41' )
(42, 2, 'C2_42' )
(43, 2, 'C2_43' )
(44, 2, 'C2_44' )
(45, 2, 'C2_45' )
(46, 2, 'C2_46' )
(47, 2, 'C2_47' )
(48, 2, 'C2_48' )
(49, 2, 'C2_49' )
(50, 2, 'C2_50' )
(51, 2, 'C2_51' )
(52, 2, 'C2_52' )
(53, 2, 'C2_53' )
(54, 2, 'C2_54' )
(55, 2, 'C2_55' )
(56, 2, 'C2_56' )
(57, 2, 'C2_57' )
(58, 2, 'C2_58' )
(59, 2, 'C2_59' )
(60, 2, 'C2_60' )
(61, 3, 'C2_61' )
(62, 3, 'C2_62' )
(63, 3, 'C2_63' )
(64, 3, 'C2_64' )
(65, 3, 'C2_65' )
(66, 3, 'C2_66' )
(67, 3, 'C2_67' )
(68, 3, 'C2_68' )
(69, 3, 'C2_69' )
(70, 3, 'C2_70' )
(71, 3, 'C2_71' )
(72, 3, 'C2_72' )
(73, 3, 'C2_73' )
(74, 3, 'C2_74' )
(75, 3, 'C2_75' )
(76, 3, 'C2_76' )
(77, 3, 'C2_77' )
(78, 3, 'C2_78' )
(79, 3, 'C2_79' )
(80, 3, 'C2_80' )
(81, 3, 'C2_81' )
(82, 3, 'C2_82' )
(83, 3, 'C2_83' )
(84, 3, 'C2_84' )
(85, 3, 'C2_85' )
(86, 3, 'C2_86' )
(87, 3, 'C2_87' )
(88, 3, 'C2_88' )
(89, 3, 'C2_89' )
(90, 3, 'C2_90' )
GO
EXEC #SynchronizeP
GO
Edit: Oh my God!! I can't believe it, I add SET NOCOUNT ON to start of my SP and every thing go as expected!! does anyone knows why?? why a message that indicate count of affected row break execution of my SP.
I known that in most cases this is a good idea to add SET NOCOUNT ON in start of SP( for performance ) but why forgetting to add it break my SP??
By prefixing your SP with a #, you have made it temporary. So it probably doesn't exist when you call it from a different session in your C++ program
I think the answer is ODBC will close or cancel the command when it receive first answer from the SQL, so if I forget to use SET NOCOUNT ON and SQL send count notifications, ODBC will cancel the command. May be there is some technique to enable multiple result set for an SQL command in ODBC but I don't know such a technique

conditional group by with query api

Supose a student attandance system.
For a student and a course we have N:M relation named attandance.
Also whe have a model with attandances status (present, absent, justified, ...).
level( id, name, ... )
student ( id, name, ..., id_level )
course( id, name, ... )
status ( id, name, ...) #present, absemt, justified, ...
attandance( id, id_student, id_course, id_status, date, hour )
unique_together = ((id_student, id_course, id_status, date, hour),)
I'm looking for a list of students with >20% of absent for a level sorted by %. Something like:
present = status.objects.get( name = 'present')
justified = status.objects.get( name = 'justified')
absent = status.objects.get( name = 'absent')
#here the question. How to do this:
Student.objects.filter( level = level ).annotate(
nPresent =count( attandence where status is present or justified ),
nAbsent =count( attandence where status is absent ),
pct = nAbsent / (nAbsent + nPresent ),
).filter( pct__gte = 20 ).order_by( "-pct" )
If it is not possible to make it with query api, any workaround (lists, sets, dictionaris, ...) is wellcome!
thanks!
.
.
.
---- At this time I have a dirty raw sql writed by hand --------------------------
select
a.id_alumne,
coalesce ( count( p.id_control_assistencia ), 0 ) as p,
coalesce ( count( j.id_control_assistencia ), 0 ) as j,
coalesce ( count( f.id_control_assistencia ), 0 ) as f,
1.0 * coalesce ( count( f.id_control_assistencia ), 0 ) /
( coalesce ( count( p.id_control_assistencia ), 0 ) + coalesce ( count( f.id_control_assistencia ), 0 ) ) as tpc
from
alumne a
inner join
grup g
on (g.id_grup = a.id_grup )
inner join
curs c
on (c.id_curs = g.id_curs)
inner join
nivell n
on (n.id_nivell = c.id_nivell)
inner join
control_assistencia ca
on (ca.id_estat is not null and
ca.id_alumne = a.id_alumne )
inner join
impartir i
on ( i.id_impartir = ca.id_impartir )
left outer join
control_assistencia p
on (
p.id_estat in ( select id_estat from estat_control_assistencia where codi_estat in ('P','R' ) ) and
p.id_control_assistencia = ca.id_control_assistencia )
left outer join
control_assistencia j
on (
j.id_estat = ( select id_estat from estat_control_assistencia where codi_estat = 'J' ) and
j.id_control_assistencia = ca.id_control_assistencia )
left outer join
control_assistencia f
on (
f.id_estat = ( select id_estat from estat_control_assistencia where codi_estat = 'F' ) and
f.id_control_assistencia = ca.id_control_assistencia )
where
n.id_nivell = {0} and
i.dia_impartir >= '{1}' and
i.dia_impartir <= '{2}'
group by
a.id_alumne
having
1.0 * coalesce ( count( f.id_control_assistencia ), 0 ) /
( coalesce ( count( p.id_control_assistencia ), 0 ) + coalesce ( count( f.id_control_assistencia ), 0 ) )
> ( 1.0 * {3} / 100)
order by
1.0 * coalesce ( count( f.id_control_assistencia ), 0 ) /
( coalesce ( count( p.id_control_assistencia ), 0 ) + coalesce ( count( f.id_control_assistencia ), 0 ) )
desc
'''.format( nivell.pk, data_inici, data_fi, tpc )
If you don't care too much whether it uses the query api or python after the fact, use itertools.groupby.
attendances = Attendance.objects.select_related().filter(student__level__exact=level)
students = []
for s, g in groupby(attendances, key=lambda a: a.student.id):
g = list(g) # g is an iterator
present = len([a for a in g if a.status == 'present'])
absent = len([a for a in g if a.status == 'absent'])
justified = len([a for a in g if a.status == 'justified'])
total = len(g)
percent = int(absent / total)
students.append(dict(name=s.name, present=present, absent=absent, percent=percent))
students = (s for s in sorted(students, key=lambda x: x['percent']) if s['percent'] > 25)
You can pass the resulting list of dicts to the view the same way you would any other queryset.