I have function that takes few arguments then query delete will be executed. Right after that I call another function that will update table with time stamp and user id. The code that I use looks like this:
remote function deleteComment(required string storeID, required string recID) returnFormat="JSON" {
local.fnResults = structNew();
local.runProcess = true;
try {
if ( !len(arguments.recID) ) {
local.runProcess = false;
local.fnResults = {status: 400, message: "Error! Incomplete or invalid data."};
}
if ( runProcess ) {
local.qryParams = {rec_id: {cfsqltype: "cf_sql_numeric", value: "#arguments.recID#"}};
local.deleteSQL = "DELETE FROM Articles WHERE rec_id = :rec_id";
queryExecute(deleteSQL, qryParams, {datasource: "#application.datasource#", result: "deleteResult"});
local.updateResult = updateProfile(arguments.agencyID);
if ( deleteResult.recordcount && updateResult.recordcount ) {
local.fnResults = {status: 200, message: "Record successfully removed."};
} else {
local.fnResults = {status: 400, message: "Error! Query failed."};
}
}
} catch ( any e) {
local.fnResults = {status: 400, message: "Error! Please contact your administrator"};
}
return fnResults;
}
Here is example of the function updateProfile:
public struct function updateAgencyProfile(required string storeID) {
try {
local.qryParams = {
storeID: {cfsqltype: "cf_sql_numeric", value: "#arguments.storeID#"},
user_id: client.userid
};
local.Profile_SQL = "
UPDATE profile
SET last_update = getDate(),
user_id = :user_id
WHERE store_id = :storeID
";
queryExecute(Profile_SQL, qryParams, {datasource: "#application.datasource#", result: "updateResult"});
return updateResult;
} catch ( any e) {
return {status: 400};
}
}
As you can see in my example above delete query is executed first and right after I call public function updateProfile that will update profile table and return query result. I used this block of code to check if both queries are executed successfully:
if ( deleteResult.recordcount && updateResult.recordcount ) {
local.fnResults = {status: 200, message: "Record successfully removed."};
} else {
local.fnResults = {status: 400, message: "Error! Query failed."};
}
I'm wondering if that is necessary or I can let try catch block to detect any errors in this function? Also, is this a good fit for stored procedure? I have other functions that will need to call updateProfile to update the records. If you have any questions please let me know.
Related
I am using facebook login in my coldfusion application. I went through all the steps to get access_token. I am making cfhttp call to "/me" endpoint with access_token and fields parameters and getting "I/O Exception: peer not authenticated" error in the response/Errordetail. I made the same cfhttp call again and able to get successful response of id and name mentioned in fields parameters.
I am expecting the successful response in the first call but have to call twice, so the first call errors out and second one succeeds.
Any help on how to debug or find the issue with the first call. so i should be making one call and get a successful response of current user's information.
i should be making one call and get a successful response of current user's information
<cfscript>
if (code EQ "") {
cfoauth(type = 'facebook', clientid = appId, secretkey = appSecret, result = "res", scope = "public_profile, email", redirecturi = redirectUri);
}
if (code NEQ "") { // code is a parameter I get from url redirect after cfoauth response
try {
cfhttp(method = "GET", charset = "utf-8", url = "https://graph.facebook.com/v15.0/oauth/access_token", result = "fbcoderesponse") {
cfhttpparam(name = "code", type = "url", value = code);
cfhttpparam(name = "client_id", type = "url", value = appId);
cfhttpparam(name = "client_secret", type = "url", value = appSecret);
cfhttpparam(name = "redirect_uri", type = "url", value = redirectUri);
}
dresponse = deserializeJSON(fbcoderesponse.filecontent);
accessToken = dresponse.access_token;
writedump(accessToken);
accessTokenurl = "https://graph.facebook.com/v15.0/me?fields=id,name&access_token=" & accessToken;
writedump(accessTokenurl);
} catch (any e) {
writeoutput(e);
}
if (accessToken NEQ "") {
try {
cfhttp(method = "GET", charset = "utf-8", url = accessTokenurl, result = "fbaccesstokenresponseâ){}
writedump(fbaccesstokenresponse);
if (fbaccesstokenresponse.errordetail != "") {
cfhttp(method = "GET", charset = "utf-8", url = accessTokenurl, result = "fbaccesstokenresponse1");
writeoutput("fbaccesstokenresponse1 response");
writedump(fbaccesstokenresponse1);
}
} catch (any e) {
writeoutput("exception of daccesstokenresponse");
writedump(e);
}
} else {
writeoutput("accesstoken is empty");
}
}
</cfscript>
I'm trying to use the Google.Cloud.RecaptchaEnterprise library to validate captcha requests for the new enterprise key my client has obtained.
string _siteKey = ConfigurationManager.AppSettings["GoogleCaptcha.CheckboxCaptcha.SiteKey"];
string _apiKey = ConfigurationManager.AppSettings["GoogleCaptcha.ApiKey"];
string _projectId = ConfigurationManager.AppSettings["GoogleCaptcha.ProjectId"];
string recaptchaAction = "CreateAccountAssessment";
try {
var appPath = System.Web.Hosting.HostingEnvironment.ApplicationPhysicalPath;
string credential_path = appPath + "googlecredentials.json";
System.Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credential_path);
RecaptchaEnterpriseServiceClient client =
RecaptchaEnterpriseServiceClient.Create();
CreateAssessmentRequest createAssessmentRequest = new CreateAssessmentRequest()
{
Assessment = new Assessment()
{
Event = new Event()
{
SiteKey = _siteKey,
Token = formResponse,
ExpectedAction = "Create_Account"
},
Name = recaptchaAction,
},
Parent = _projectId
};
Assessment response = client.CreateAssessment(createAssessmentRequest);
if (response.TokenProperties.Valid == false)
{
Sitecore.Diagnostics.Log.Error("The CreateAssessment() call failed " +
"because the token was invalid for the following reason: " +
response.TokenProperties.InvalidReason.ToString(), this);
return "Invalid captcha.";
}
else
{
if (response.Event.ExpectedAction == recaptchaAction)
{
Sitecore.Diagnostics.Log.Error("The reCAPTCHA score for this token is: " +
response.RiskAnalysis.Score.ToString(), this);
return "";
}
else
{
Sitecore.Diagnostics.Log.Error("The action attribute in your reCAPTCHA " +
"tag does not match the action you are expecting to score", this);
return "Invalid captcha.";
}
}
}
catch (Exception ex)
{
Sitecore.Diagnostics.Log.Error("Error validating captcha on " + _url + "; " + ex.Message, this);
return "Unable to connect to captcha service.";
}
As far as I can tell all my properties are correct, but it throws an error on Assessment response = client.CreateAssessment(createAssessmentRequest);:
Status(StatusCode="InvalidArgument", Detail="Request contains an invalid argument.", DebugException="Grpc.Core.Internal.CoreErrorDetailException: {"created":"#1621287236.280000000","description":"Error received from peer ipv6:[2607:f8b0:4006:81a::200a]:443","file":"T:\src\github\grpc\workspace_csharp_ext_windows_x64\src\core\lib\surface\call.cc","file_line":1062,"grpc_message":"Request contains an invalid argument.","grpc_status":3}")
I strongly suspect the problem (or at least a problem) is the Parent property of the request.
From the documentation:
The name of the project in which the assessment will be created, in the format "projects/{project}".
... whereas I suspect your project ID is just the ID, rather than the resource name starting with "projects/".
I would recommend using the generated resource name classes as far as possible, with the corresponding properties. So in this case, you'd have:
CreateAssessmentRequest createAssessmentRequest = new CreateAssessmentRequest
{
Assessment = new Assessment
{
Event = new Event
{
SiteKey = _siteKey,
Token = formResponse,
ExpectedAction = "Create_Account"
},
Name = recaptchaAction,
},
ParentAsProjectName = new ProjectName(_projectId)
};
I've read a lot of similar questions around adding newline characters to firehose, but they're all around adding the newline character to the source. The problem is that I don't have access to the source, and a third party is piping data to our Kinesis instance and I cannot add the \n to the source.
I've tried doing a Firehose data transformation using the following code:
'use strict';
console.log('Loading function');
exports.handler = (event, context, callback) => {
/* Process the list of records and transform them */
const output = [];
event.records.forEach((record) => {
const results = {
/* This transformation is the "identity" transformation, the data is left intact */
recordId: record.recordId,
result: record.data.event_type === 'alert' ? 'Dropped' : 'Ok',
data: record.data + '\n'
};
output.push(results);
});
console.log(`Processing completed. Successful records ${output.length}.`);
callback(null, { records: output });
};
but the newline is still lost. I've also tried JSON.stringify(record.data) + '\n' but then I get an Invalid output structure error.
Try decoding the record.data
add a new line
then encode it again as base64.
This is python but the idea is the same
for record in event['records']:
payload = base64.b64decode(record['data'])
# Do custom processing on the payload here
payload = payload + '\n'
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': base64.b64encode(json.dumps(payload))
}
output.append(output_record)
return {'records': output}
From the comment of #Matt Westlake:
For those looking for the Node.js answer, it's
const data =
JSON.parse(new Buffer.from(record.data,'base64').toString('utf8'));
and
new Buffer.from(JSON.stringify(data) + '\n').toString('base64')
The kinesis-firehose-cloudwatch-logs-processor blueprint lambda does this (with some additional handling for cloudwatch logs).
Here's the lambda code from the blueprint as of today:
/*
For processing data sent to Firehose by Cloudwatch Logs subscription filters.
Cloudwatch Logs sends to Firehose records that look like this:
{
"messageType": "DATA_MESSAGE",
"owner": "123456789012",
"logGroup": "log_group_name",
"logStream": "log_stream_name",
"subscriptionFilters": [
"subscription_filter_name"
],
"logEvents": [
{
"id": "01234567890123456789012345678901234567890123456789012345",
"timestamp": 1510109208016,
"message": "log message 1"
},
{
"id": "01234567890123456789012345678901234567890123456789012345",
"timestamp": 1510109208017,
"message": "log message 2"
}
...
]
}
The data is additionally compressed with GZIP.
The code below will:
1) Gunzip the data
2) Parse the json
3) Set the result to ProcessingFailed for any record whose messageType is not DATA_MESSAGE, thus redirecting them to the
processing error output. Such records do not contain any log events. You can modify the code to set the result to
Dropped instead to get rid of these records completely.
4) For records whose messageType is DATA_MESSAGE, extract the individual log events from the logEvents field, and pass
each one to the transformLogEvent method. You can modify the transformLogEvent method to perform custom
transformations on the log events.
5) Concatenate the result from (4) together and set the result as the data of the record returned to Firehose. Note that
this step will not add any delimiters. Delimiters should be appended by the logic within the transformLogEvent
method.
6) Any additional records which exceed 6MB will be re-ingested back into Firehose.
*/
const zlib = require('zlib');
const AWS = require('aws-sdk');
/**
* logEvent has this format:
*
* {
* "id": "01234567890123456789012345678901234567890123456789012345",
* "timestamp": 1510109208016,
* "message": "log message 1"
* }
*
* The default implementation below just extracts the message and appends a newline to it.
*
* The result must be returned in a Promise.
*/
function transformLogEvent(logEvent) {
return Promise.resolve(`${logEvent.message}\n`);
}
function putRecordsToFirehoseStream(streamName, records, client, resolve, reject, attemptsMade, maxAttempts) {
client.putRecordBatch({
DeliveryStreamName: streamName,
Records: records,
}, (err, data) => {
const codes = [];
let failed = [];
let errMsg = err;
if (err) {
failed = records;
} else {
for (let i = 0; i < data.RequestResponses.length; i++) {
const code = data.RequestResponses[i].ErrorCode;
if (code) {
codes.push(code);
failed.push(records[i]);
}
}
errMsg = `Individual error codes: ${codes}`;
}
if (failed.length > 0) {
if (attemptsMade + 1 < maxAttempts) {
console.log('Some records failed while calling PutRecordBatch, retrying. %s', errMsg);
putRecordsToFirehoseStream(streamName, failed, client, resolve, reject, attemptsMade + 1, maxAttempts);
} else {
reject(`Could not put records after ${maxAttempts} attempts. ${errMsg}`);
}
} else {
resolve('');
}
});
}
function putRecordsToKinesisStream(streamName, records, client, resolve, reject, attemptsMade, maxAttempts) {
client.putRecords({
StreamName: streamName,
Records: records,
}, (err, data) => {
const codes = [];
let failed = [];
let errMsg = err;
if (err) {
failed = records;
} else {
for (let i = 0; i < data.Records.length; i++) {
const code = data.Records[i].ErrorCode;
if (code) {
codes.push(code);
failed.push(records[i]);
}
}
errMsg = `Individual error codes: ${codes}`;
}
if (failed.length > 0) {
if (attemptsMade + 1 < maxAttempts) {
console.log('Some records failed while calling PutRecords, retrying. %s', errMsg);
putRecordsToKinesisStream(streamName, failed, client, resolve, reject, attemptsMade + 1, maxAttempts);
} else {
reject(`Could not put records after ${maxAttempts} attempts. ${errMsg}`);
}
} else {
resolve('');
}
});
}
function createReingestionRecord(isSas, originalRecord) {
if (isSas) {
return {
Data: new Buffer(originalRecord.data, 'base64'),
PartitionKey: originalRecord.kinesisRecordMetadata.partitionKey,
};
} else {
return {
Data: new Buffer(originalRecord.data, 'base64'),
};
}
}
function getReingestionRecord(isSas, reIngestionRecord) {
if (isSas) {
return {
Data: reIngestionRecord.Data,
PartitionKey: reIngestionRecord.PartitionKey,
};
} else {
return {
Data: reIngestionRecord.Data,
};
}
}
exports.handler = (event, context, callback) => {
Promise.all(event.records.map(r => {
const buffer = new Buffer(r.data, 'base64');
const decompressed = zlib.gunzipSync(buffer);
const data = JSON.parse(decompressed);
// CONTROL_MESSAGE are sent by CWL to check if the subscription is reachable.
// They do not contain actual data.
if (data.messageType === 'CONTROL_MESSAGE') {
return Promise.resolve({
recordId: r.recordId,
result: 'Dropped',
});
} else if (data.messageType === 'DATA_MESSAGE') {
const promises = data.logEvents.map(transformLogEvent);
return Promise.all(promises)
.then(transformed => {
const payload = transformed.reduce((a, v) => a + v, '');
const encoded = new Buffer(payload).toString('base64');
return {
recordId: r.recordId,
result: 'Ok',
data: encoded,
};
});
} else {
return Promise.resolve({
recordId: r.recordId,
result: 'ProcessingFailed',
});
}
})).then(recs => {
const isSas = Object.prototype.hasOwnProperty.call(event, 'sourceKinesisStreamArn');
const streamARN = isSas ? event.sourceKinesisStreamArn : event.deliveryStreamArn;
const region = streamARN.split(':')[3];
const streamName = streamARN.split('/')[1];
const result = { records: recs };
let recordsToReingest = [];
const putRecordBatches = [];
let totalRecordsToBeReingested = 0;
const inputDataByRecId = {};
event.records.forEach(r => inputDataByRecId[r.recordId] = createReingestionRecord(isSas, r));
let projectedSize = recs.filter(rec => rec.result === 'Ok')
.map(r => r.recordId.length + r.data.length)
.reduce((a, b) => a + b);
// 6000000 instead of 6291456 to leave ample headroom for the stuff we didn't account for
for (let idx = 0; idx < event.records.length && projectedSize > 6000000; idx++) {
const rec = result.records[idx];
if (rec.result === 'Ok') {
totalRecordsToBeReingested++;
recordsToReingest.push(getReingestionRecord(isSas, inputDataByRecId[rec.recordId]));
projectedSize -= rec.data.length;
delete rec.data;
result.records[idx].result = 'Dropped';
// split out the record batches into multiple groups, 500 records at max per group
if (recordsToReingest.length === 500) {
putRecordBatches.push(recordsToReingest);
recordsToReingest = [];
}
}
}
if (recordsToReingest.length > 0) {
// add the last batch
putRecordBatches.push(recordsToReingest);
}
if (putRecordBatches.length > 0) {
new Promise((resolve, reject) => {
let recordsReingestedSoFar = 0;
for (let idx = 0; idx < putRecordBatches.length; idx++) {
const recordBatch = putRecordBatches[idx];
if (isSas) {
const client = new AWS.Kinesis({ region: region });
putRecordsToKinesisStream(streamName, recordBatch, client, resolve, reject, 0, 20);
} else {
const client = new AWS.Firehose({ region: region });
putRecordsToFirehoseStream(streamName, recordBatch, client, resolve, reject, 0, 20);
}
recordsReingestedSoFar += recordBatch.length;
console.log('Reingested %s/%s records out of %s in to %s stream', recordsReingestedSoFar, totalRecordsToBeReingested, event.records.length, streamName);
}
}).then(
() => {
console.log('Reingested all %s records out of %s in to %s stream', totalRecordsToBeReingested, event.records.length, streamName);
callback(null, result);
},
failed => {
console.log('Failed to reingest records. %s', failed);
callback(failed, null);
});
} else {
console.log('No records needed to be reingested.');
callback(null, result);
}
}).catch(ex => {
console.log('Error: ', ex);
callback(ex, null);
});
};
Here is code that will solve the problem
__Author__ = "Soumil Nitin Shah"
import json
import boto3
import base64
class MyHasher(object):
def __init__(self, key):
self.key = key
def get(self):
keys = str(self.key).encode("UTF-8")
keys = base64.b64encode(keys)
keys = keys.decode("UTF-8")
return keys
def lambda_handler(event, context):
output = []
for record in event['records']:
payload = base64.b64decode(record['data'])
"""Get the payload from event bridge and just get data attr """""
serialize_payload = str(json.loads(payload)) + "\n"
hasherHelper = MyHasher(key=serialize_payload)
hash = hasherHelper.get()
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': hash
}
print("output_record", output_record)
output.append(output_record)
return {'records': output}
In Vtiger 6.5.0 open source, I wants to create a alert function to warn users that the conact's mobile is existing? could you please help me. I'm fresher.
Thanks,
Loi
You can refer the function wich exist in Account module for checking Duplicate Account Name.
Please follow this files you will get an idea.
This is the code flow how its done In Account Module
Registring Pre Save Event
http://code.vtiger.com/vtiger/vtigercrm/blob/master/layouts/vlayout/modules/Accounts/resources/Edit.js#L250
This teh Fucntion to check Duplicate in cache, If not calls the Helper function
http://code.vtiger.com/vtiger/vtigercrm/blob/master/layouts/vlayout/modules/Accounts/resources/Edit.js#L83
This the Helper function which makes the call to server
http://code.vtiger.com/vtiger/vtigercrm/blob/master/resources/helper.js#L166
This is the action function which is responsible for Serving the request which came from Helper Function
http://code.vtiger.com/vtiger/vtigercrm/blob/master/modules/Accounts/actions/CheckDuplicate.php#L30
And this is the function which checks for Duplicate
http://code.vtiger.com/vtiger/vtigercrm/blob/master/modules/Accounts/models/Record.php#L57
Hope this helps.
Hi Victor please follow this steps
modules\Leads\actions\Checkprimaryemail.php
<?php
class Leads_Checkprimaryemail_Action extends Vtiger_BasicAjax_Action {
public function checkPermission(Vtiger_Request $request) {
return;
}
public function process(Vtiger_Request $request) {
global $adb;
$moduleName = $request->get('module');
$recordId = $request->get('recordId');
$primary_email = $request->get('primary_email');
/*Lead Details*/
$lead_query = "select * from vtiger_leaddetails
inner join vtiger_crmentity on vtiger_crmentity.crmid=vtiger_leaddetails.leadid
where vtiger_crmentity.deleted = 0 and vtiger_leaddetails.email='".$primary_email."'";
$lead_result = $adb->query($lead_query);
$lead_email = $adb->query_result($lead_result,0,'email');
$lead_numrows = $adb->num_rows($lead_result);
/*Contact Details*/
$cont_query = "select * from vtiger_contactdetails
inner join vtiger_crmentity on vtiger_crmentity.crmid=vtiger_contactdetails.contactid
where vtiger_crmentity.deleted = 0 and vtiger_contactdetails.email='".$primary_email."'";
$cont_result = $adb->query($cont_query);
$cont_email = $adb->query_result($cont_result,0,'email');
$cont_numrows = $adb->num_rows($cont_result);
if($recordId != '' ){
if($primary_email == $lead_email && $lead_numrows == 1 ){
$emailtrue = 0;
} elseif($primary_email == $cont_email && $cont_numrows >= 1 ) {
$emailtrue = 1;
}
} else {
if(($lead_numrows >=1 || $cont_numrows >=1 ) || ($lead_numrows >=1 && $cont_numrows >= 1) ){
$emailtrue = 1;
} else {
$emailtrue = 0;
}
}
$emailData = array($emailtrue);
$response = new Vtiger_Response();
$response->setResult($emailData);
$response->emit();
}
}
?>
After Create One other file
layouts\vlayout\modules\Leads\resources\Edit.js
Vtiger_Edit_Js("Leads_Edit_Js", {
}, {
changeEvent: function (container) {
jQuery('input[name="email"]').on('focusout', function (e) {
var email = jQuery('input[name="email"]').val();
var recordId = jQuery('input[name="record"]').val();
var email_length = email.length;
if (email != '') {
if (email_length > 100) {
var errorMessage = app.vtranslate('JS_EMAIL_LENGTH_VALIDATION');
params = {
text: errorMessage,
'type': 'error',
};
Vtiger_Helper_Js.showMessage(params);
}
var progressIndicatorElement = jQuery.progressIndicator({
'position': 'html',
'blockInfo': {
'enabled': true
}
});
var postData = {
"module": 'Leads',
"action": "Checkprimaryemail",
"primary_email": email,
"recordId": recordId
}
AppConnector.request(postData).then(
function (data) {
progressIndicatorElement.progressIndicator({'mode': 'hide'});
if (data['result'] == 1) {
jQuery('#emailalready_exists').val(1);
var errorMessage = app.vtranslate('JS_EMAIL_EXIST');
params = {
text: errorMessage,
'type': 'error',
};
Vtiger_Helper_Js.showMessage(params);
} else {
jQuery('#emailalready_exists').val(0);
}
},
function (error, err) {}
);
e.preventDefault();
}
});
},
registerBasicEvents: function (container) {
this._super(container);
this.changeEvent(container);
}
});
To check duplicate records in vTiger follow below steps:
Register checkDuplicate function in registerBasicEvents
1: \layouts\vlayout\modules\Contacts\resources\Edit.js
getmobile : function(container){
return jQuery('input[name="mobile"]',container).val();
},
getRecordId : function(container){
return jQuery('input[name="record"]',container).val();
},
DuplicateCheck : function(form) {
var thisInstance = this;
if(typeof form == 'undefined') {
form = this.getForm();
}
jQuery( "#mobileFieldId" ).change(function() {
var mobile = thisInstance.getmobile(form);
var recordId = thisInstance.getRecordId(form);
var params = {
'module' : "Contacts",
'action' : "CheckDuplicate",
'mobile' : mobile,
'record' : recordId
}
AppConnector.request(params).then(
function(data) {
var response = data['result'];
var result = response['success'];
if(result == true) {
var message_params = {
title : app.vtranslate('JS_MESSAGE'),
text: response['message'],
animation: 'show',
type: 'error'
};
Vtiger_Helper_Js.showPnotify(message_params);
jQuery(".btn-success").attr('disabled',true);
return false;
} else {
jQuery(".btn-success").attr('disabled',false);
}
}
);
});
},
2: Create new file in** \modules\Contacts\actions\CheckDuplicate.php
Follow the same process / code as given in \modules\Accounts\actions\CheckDuplicate.php
3: Add new function checkDuplicate() in \modules\Contacts\models\Record.php
And follow same process as given in \modules\Accounts\models\Record.php having function checkDuplicate()
Note: Don't forget to change the db table name, class name module wise.
Hope this will help you. Thank you.
I'm in the process on converting an asp repeater into an EXTJS grid. Above the repeater is a dropdown and a radiobutton list. The dropdown selects which clients' data the repeater shows, and the radiobuttonlist selects the query type (default, resource, or role). Currently, when the ddl or radiobutton is changed, the page postsback with the new data.
I'm not sure how to pass the value of these two objects into my static webservice on the backend via the extjs store api GET call.
The extjs store code...
store: Ext.create('Ext.data.Store', {
autoLoad: true,
autoSync: false,
model: 'Assembly',
proxy: {
type: 'ajax',
headers: { "Content-Type": 'application/json' },
api: {
read: '/Admin/BillRateData.aspx/Get'
},
reader: {
type: 'json',
root: function (o) {
if (o.d) {
return o.d;
} else {
return o.children;
}
}
},
writer: {
type: 'json',
root: 'jsonData',
encode: false,
allowSingle: false
},
listeners: {
exception: function (proxy, response, operation) {
Ext.MessageBox.show({
title: "Workflow Groups Error",
msg: operation.action + ' Operation Failed: ' + operation.getError().statusText,
icon: Ext.MessageBox.ERROR,
buttons: Ext.Msg.OK
});
}
}
}
And the webservice...(with some psuedocode)
[WebMethod]
[ScriptMethod(ResponseFormat = ResponseFormat.Json, UseHttpGet = true)]
public static List<BillRate> Get()
{
using (TimEntities db = new TimEntities())
{
int tableId = Int32.Parse(ddlTable.SelectedValue);
var defaultQry = from t1 in db.BillCostTableDatas
where t1.TableId == tableId
&& t1.ResourceId == 0 && t1.RoleId == 0
orderby t1.Rate
select new
{
id = t1.Id,
resource = "",
role = "",
rate = t1.Rate,
TierName = ""
};
var resourceQry = from t1 in db.BillCostTableDatas
join t2 in db.Machines on t1.ResourceId equals t2.Machine_ID
join t3 in db.TOMIS_USER on t2.Machine_User_ID equals t3.User_ID
join t4 in db.PricingTierNames on t1.PricingTierID equals t4.TierID
where t1.TableId == tableId
&& t1.ResourceId != 0
&& t1.RoleId == 0
orderby t3.LName, t3.FName, t1.Rate, t4.TierName
select new
{
id = t1.Id,
resource = t3.LName + ", " + t3.FName,
role = "",
rate = t1.Rate,
TierName = t4.TierName
};
var roleQry = from t1 in db.BillCostTableDatas
join t2 in db.TaskRoles on t1.RoleId equals t2.Id
where t1.TableId == tableId
&& t1.ResourceId == 2 && t1.RoleId != 0
orderby t2.Name, t1.Rate
select new
{
id = t1.Id,
resource = "",
role = t2.Name,
rate = t1.Rate,
TierName = ""
};
if (this.rblOptions.SelectedValue == "resource")
{
var results = from Res in resourceQry.ToList()
select new BillRate
{
};
return results.ToList();
}
else if (this.rblOptions.SelectedValue == "role")
{
var results = from Res in roleQry.ToList()
select new BillRate
{
};
return results.ToList();
}
else
{
var results = from Res in defaultQry.ToList()
select new BillRate
{
};
return results.ToList();
}
return null;
}
}
If you trigger your store loading manually, you can pass the params options to the load method.
Example:
var store = Ext.create('Ext.data.Store', {
// prevent the store from loading before we told it to do so
autoLoad: false
...
});
store.load({
params: {clientId: 123, queryType: 'default'}
...
});
If you want the params to be sent for multiple subsequent queries, you can write them in the extraParams property of the proxy.
Example:
var store = Ext.create('Ext.data.Store', { ... });
Ext.apply(store.getProxy().extraParams, {
clientId: 321
,queryType: 'role'
});
// the store will still need a refresh
store.reload();
The way these params are passed to the server will depend on the type of request. For GET ones, they will be appended as query params; for POST they will be embedded in the request body.