Flutter/Dart/Firestore - Group documents by day in an AnimtedList - list

How can I group messages between users by day? So far it just adds to the list and sorts it by day but I can't seem to add a gap between the messages that were sent in past days. The list is being displayed in an AnimatedList()
Code :
List<Message> _messages = [];
...
_initMessages() { // Being called in the initState
APIs().chats.messages(
chatID: widget.chat.chatID,
onEmpty: () {
if (mounted)
setState(() {
this._isLoading = false;
});
},
onAdded: (m) {
if (mounted)
setState(() {
if (m.type == 'Image') {
m.message = NetworkImage(m.message);
}
this._messages.add(m);
this._messages.sort((a, b) => a.createdAt.compareTo(b.createdAt));
if (mounted) this._listKey.currentState.insertItem(this._messages.length, duration: Duration(milliseconds: 500));
this._isLoading = false;
});
},
onModified: (m) {
int i = this._messages.indexWhere((message) => message.messageID == m.messageID);
if (mounted)
setState(() {
this._messages[i] = m;
this._messages.sort((a, b) => a.createdAt.compareTo(b.createdAt));
this._isLoading = false;
});
},
onRemoved: (m) {
int i = this._messages.indexWhere((message) => message.messageID == m.messageID);
if (mounted)
setState(() {
this._messages.removeAt(i);
this._messages.sort((a, b) => a.createdAt.compareTo(b.createdAt));
this._isLoading = false;
});
},
onFailure: (e) {
print(e);
});
}
It shows the messages in order but when I changed it to an AnimatedList it gives me an error:
[VERBOSE-2:ui_dart_state.cc(148)] Unhandled Exception: 'package:flutter/src/widgets/animated_list.dart': Failed assertion: line 279 pos 12: 'itemIndex >= 0 && itemIndex <= _itemsCount': is not true.
Kinda a two-part question, group the documents by day and try to get it to work in an AnimatedList. The date is of the Firestore Timestamp type.

Related

I am mocking two functions exactly the same way. In one case the mock value is returned and in another case the real function is called. Why?

I have a file that exports some functions:
function getNow() {
console.log('real now');
return dayjs();
}
function groupProducts(productInfos, now) {
console.log('real group');
return productInfos.reduce((groups, productInfo) => {
const groupKey = dayjs(productInfo.saleStartDate) > now ? dayjs(productInfo.saleStartDate).format('YYYY-MM-DD') : dayjs(now).format('YYYY-MM-DD');
let group = groups[groupKey];
if (!group) {
group = [];
// eslint-disable-next-line no-param-reassign
groups[groupKey] = group;
}
group.push(productInfo.itemId);
return groups;
}, {});
}
async function fetchProducts(client, productInfos, now) {
const products = [];
const groups = groupProducts(productInfos, now);
for (const [date, ids] of Object.entries(productQueryGroups)) {
// eslint-disable-next-line no-await-in-loop
const productBatch = await fetchResources(
client.queryProducts,
{
articleIds: ids,
timestamp: date,
},
);
products.push(...productBatch);
}
return products;
}
module.exports = {
test: {
getNow,
groupProducts,
fetchProducts,
},
};
I run my tests with:
package.json script
"testw": "npx ../node_modules/.bin/jest --watch",
cli command:
npm run testw -- filename
In this test I exercise groupProducts and mock getNow. The real getNow is never called and the test passes.
describe('groupProducts', () => {
it('groups productInfo ids into today or future date groups', () => {
// Arrange
const nowSpy = jest.spyOn(test, 'getNow').mockReturnValue(dayjs('2001-02-03T04:05:06.007Z'));
const expectedMap = {
'2001-02-03': ['Art-Past', 'Art-Today'],
'2002-12-31': ['Art-Future-1', 'Art-Future-2'],
'2003-12-31': ['Art-Other-Future'],
};
const productInfos = [{
itemId: 'Art-Past',
saleStartDate: '1999-01-01',
}, {
itemId: 'Art-Today',
saleStartDate: '2001-02-03',
}, {
itemId: 'Art-Future-1',
saleStartDate: '2002-12-31',
}, {
itemId: 'Art-Future-2',
saleStartDate: '2002-12-31',
}, {
itemId: 'Art-Other-Future',
saleStartDate: '2003-12-31',
}];
// Assert
const dateToIds = test.groupProductInfosByTodayOrFutureDate(productInfos, test.getNow());
// Expect
expect(dateToIds).toEqual(expectedMap);
// Restore
nowSpy.mockRestore();
});
});
In this test I exercise fetchProducts and mock groupProducts. The real groupProducts is called and the causes the test to fail.
describe('fetchProducts', () => {
it.only('calls fetchResources with the timestamp and date for every product query group', async () => {
// Arrange
const productQueryGroups = {
[test.PRICE_GROUPS.CURRENT]: ['Art-Past', 'Art-Today'],
[test.PRICE_GROUPS.FUTURE]: ['Art-Future-1', 'Art-Future-2', 'Art-Other-Future'],
};
const groupProductsSpy = jest.spyOn(test, 'groupProducts').mockReturnValue( productQueryGroups);
const fetchResourcesSpy = jest.spyOn(test, 'fetchResources').mockResolvedValue([]);
// Act
await test.fetchProducts();
// Expect
expect(test.fetchResources).toHaveBeenCalledWith(expect.anything(), expect.objectContaining({ articleIds: [productQueryGroups[test.PRICE_GROUPS.CURRENT]], timestamp: test.PRICE_GROUPS.CURRENT }));
// Restore
groupProductsSpy.mockRestore();
fetchResourcesSpy.mockRestore();
});
});
Error message
98 | function groupProducts(productInfos, now) {
> 99 | return productInfos.reduce((groups, productInfo) => {
| ^
100 | const groupKey = dayjs(productInfo.saleStartDate) > now ? dayjs(productInfo.saleStartDate).format('YYYY-MM-DD') : dayjs(now).format('YYYY-MM-DD');
101 |
102 | let group = groups[groupKey];
Why is the real groupProducts called? To me it looks completely analogous to the previous example.

Compare node_modules for each releases

Looking for a way to compare package changes between releases.
I need a script or something that when run will show the difference (what packages are new, deleted or updated) between master tagged releases, by comparing the node_modules.
List with all changed or new packages/modules within tree of node_modules.
just came back from holiday, so this is the way I attempted a solution:
const checker = require('license-checker')
const compareVersions = require('compare-versions')
const rimraf = require('rimraf')
const { WebClient } = require('#slack/web-api')
const { exec } = require('child_process')
// An access token (from your Slack app or custom integration - xoxp, xoxb)
const token =
'xoxp-3712510934-8544rv58640-699363584817-a66630cfebf2f81e59478c3f8u0e178b'
const channel = 'where_to_post_report'
let prevReleasePackets
let currReleasePackets
function comparerVersion(otherArray) {
return current =>
otherArray.filter(other => other.name === current.name).length === 0
}
function comparerVersionNo(otherArray) {
return current =>
otherArray.filter(
other =>
other.name === current.name &&
compareVersions(other.version, current.version) === -1
).length === 0
}
function mapToData(libs) {
return Object.keys(libs)
.filter(key => key.indexOf('sm-web') === -1 && key.indexOf('debug') === -1)
.map(key => {
const lib = libs[key]
const name = lib.name
.replace(/#/g, '_')
.replace(/\./g, '_')
.replace(/\//g, '_')
const version = lib.version.replace(/#/g, '_').replace(/\//g, '_')
return {
name,
version,
}
})
}
function groupBy(objectArray, property) {
return objectArray.reduce((acc, obj) => {
const key = obj[property]
if (!acc[key]) {
acc[key] = []
}
acc[key].push(obj.version)
return acc
}, {})
}
function getPackets(path) {
return new Promise((resolve, reject) => {
checker.init(
{
start: path,
production: true,
customFormat: {
name: '',
version: '',
},
},
(err, packages) => {
if (err) {
// Handle error
console.log(err)
reject(err)
} else {
// The sorted package data
const packagesReduced = groupBy(mapToData(packages), 'name')
const higerVerionList = []
Object.keys(packagesReduced).forEach(key => {
const versions = packagesReduced[key]
const descVersions = versions.sort(compareVersions).reverse()
higerVerionList.push({ name: key, version: descVersions[0] })
})
resolve(higerVerionList)
}
}
)
})
}
function clonePrevious(tag) {
return new Promise((resolve, reject) => {
exec(
`git clone https://bitbucket.path_to_repo.git prevVersion && cd prevVersion && git checkout tags/${tag} && yarn && cd ..`,
async (error, stdout, stderr) => {
if (error) {
console.warn(error)
reject(error)
}
if (stdout) {
const prevPacks = await getPackets('./prevVersion')
resolve(prevPacks)
} else {
resolve(stderr)
}
}
)
})
}
async function sendReportToSlack(report) {
const web = new WebClient(token)
const res = await web.chat.postMessage({
channel,
text: report,
})
// `res` contains information about the posted message
console.log('Report sent: ', res.ts)
}
const readline = require('readline').createInterface({
input: process.stdin,
output: process.stdout,
})
console.log('Deleting prevVersion folder')
rimraf('prevVersion', async () => {
console.log('Done deleting prevVersion folder')
readline.question(
`What's the version tag for previous release?`,
async tag => {
console.log(`Start cloning and shit the release ${tag}!`)
readline.close()
prevReleasePackets = await clonePrevious(tag)
currReleasePackets = await getPackets('./')
const update = currReleasePackets.filter(
comparerVersionNo(prevReleasePackets)
)
const deleted = prevReleasePackets.filter(
comparerVersion(currReleasePackets)
)
const newPacks = currReleasePackets.filter(
comparerVersion(prevReleasePackets)
)
const slackMessage =
`*Packages changes from ${tag}*:\n\n` +
`*--Updated--*: ${JSON.stringify(
update
)},\n\n *--Deleted--*: ${JSON.stringify(
deleted
)} \n\n *--New--*: ${JSON.stringify(newPacks)}`
await sendReportToSlack(slackMessage)
}
)
})

AWS Firehose newline Character

I've read a lot of similar questions around adding newline characters to firehose, but they're all around adding the newline character to the source. The problem is that I don't have access to the source, and a third party is piping data to our Kinesis instance and I cannot add the \n to the source.
I've tried doing a Firehose data transformation using the following code:
'use strict';
console.log('Loading function');
exports.handler = (event, context, callback) => {
/* Process the list of records and transform them */
const output = [];
event.records.forEach((record) => {
const results = {
/* This transformation is the "identity" transformation, the data is left intact */
recordId: record.recordId,
result: record.data.event_type === 'alert' ? 'Dropped' : 'Ok',
data: record.data + '\n'
};
output.push(results);
});
console.log(`Processing completed. Successful records ${output.length}.`);
callback(null, { records: output });
};
but the newline is still lost. I've also tried JSON.stringify(record.data) + '\n' but then I get an Invalid output structure error.
Try decoding the record.data
add a new line
then encode it again as base64.
This is python but the idea is the same
for record in event['records']:
payload = base64.b64decode(record['data'])
# Do custom processing on the payload here
payload = payload + '\n'
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': base64.b64encode(json.dumps(payload))
}
output.append(output_record)
return {'records': output}
From the comment of #Matt Westlake:
For those looking for the Node.js answer, it's
const data =
JSON.parse(new Buffer.from(record.data,'base64').toString('utf8'));
and
new Buffer.from(JSON.stringify(data) + '\n').toString('base64')
The kinesis-firehose-cloudwatch-logs-processor blueprint lambda does this (with some additional handling for cloudwatch logs).
Here's the lambda code from the blueprint as of today:
/*
For processing data sent to Firehose by Cloudwatch Logs subscription filters.
Cloudwatch Logs sends to Firehose records that look like this:
{
"messageType": "DATA_MESSAGE",
"owner": "123456789012",
"logGroup": "log_group_name",
"logStream": "log_stream_name",
"subscriptionFilters": [
"subscription_filter_name"
],
"logEvents": [
{
"id": "01234567890123456789012345678901234567890123456789012345",
"timestamp": 1510109208016,
"message": "log message 1"
},
{
"id": "01234567890123456789012345678901234567890123456789012345",
"timestamp": 1510109208017,
"message": "log message 2"
}
...
]
}
The data is additionally compressed with GZIP.
The code below will:
1) Gunzip the data
2) Parse the json
3) Set the result to ProcessingFailed for any record whose messageType is not DATA_MESSAGE, thus redirecting them to the
processing error output. Such records do not contain any log events. You can modify the code to set the result to
Dropped instead to get rid of these records completely.
4) For records whose messageType is DATA_MESSAGE, extract the individual log events from the logEvents field, and pass
each one to the transformLogEvent method. You can modify the transformLogEvent method to perform custom
transformations on the log events.
5) Concatenate the result from (4) together and set the result as the data of the record returned to Firehose. Note that
this step will not add any delimiters. Delimiters should be appended by the logic within the transformLogEvent
method.
6) Any additional records which exceed 6MB will be re-ingested back into Firehose.
*/
const zlib = require('zlib');
const AWS = require('aws-sdk');
/**
* logEvent has this format:
*
* {
* "id": "01234567890123456789012345678901234567890123456789012345",
* "timestamp": 1510109208016,
* "message": "log message 1"
* }
*
* The default implementation below just extracts the message and appends a newline to it.
*
* The result must be returned in a Promise.
*/
function transformLogEvent(logEvent) {
return Promise.resolve(`${logEvent.message}\n`);
}
function putRecordsToFirehoseStream(streamName, records, client, resolve, reject, attemptsMade, maxAttempts) {
client.putRecordBatch({
DeliveryStreamName: streamName,
Records: records,
}, (err, data) => {
const codes = [];
let failed = [];
let errMsg = err;
if (err) {
failed = records;
} else {
for (let i = 0; i < data.RequestResponses.length; i++) {
const code = data.RequestResponses[i].ErrorCode;
if (code) {
codes.push(code);
failed.push(records[i]);
}
}
errMsg = `Individual error codes: ${codes}`;
}
if (failed.length > 0) {
if (attemptsMade + 1 < maxAttempts) {
console.log('Some records failed while calling PutRecordBatch, retrying. %s', errMsg);
putRecordsToFirehoseStream(streamName, failed, client, resolve, reject, attemptsMade + 1, maxAttempts);
} else {
reject(`Could not put records after ${maxAttempts} attempts. ${errMsg}`);
}
} else {
resolve('');
}
});
}
function putRecordsToKinesisStream(streamName, records, client, resolve, reject, attemptsMade, maxAttempts) {
client.putRecords({
StreamName: streamName,
Records: records,
}, (err, data) => {
const codes = [];
let failed = [];
let errMsg = err;
if (err) {
failed = records;
} else {
for (let i = 0; i < data.Records.length; i++) {
const code = data.Records[i].ErrorCode;
if (code) {
codes.push(code);
failed.push(records[i]);
}
}
errMsg = `Individual error codes: ${codes}`;
}
if (failed.length > 0) {
if (attemptsMade + 1 < maxAttempts) {
console.log('Some records failed while calling PutRecords, retrying. %s', errMsg);
putRecordsToKinesisStream(streamName, failed, client, resolve, reject, attemptsMade + 1, maxAttempts);
} else {
reject(`Could not put records after ${maxAttempts} attempts. ${errMsg}`);
}
} else {
resolve('');
}
});
}
function createReingestionRecord(isSas, originalRecord) {
if (isSas) {
return {
Data: new Buffer(originalRecord.data, 'base64'),
PartitionKey: originalRecord.kinesisRecordMetadata.partitionKey,
};
} else {
return {
Data: new Buffer(originalRecord.data, 'base64'),
};
}
}
function getReingestionRecord(isSas, reIngestionRecord) {
if (isSas) {
return {
Data: reIngestionRecord.Data,
PartitionKey: reIngestionRecord.PartitionKey,
};
} else {
return {
Data: reIngestionRecord.Data,
};
}
}
exports.handler = (event, context, callback) => {
Promise.all(event.records.map(r => {
const buffer = new Buffer(r.data, 'base64');
const decompressed = zlib.gunzipSync(buffer);
const data = JSON.parse(decompressed);
// CONTROL_MESSAGE are sent by CWL to check if the subscription is reachable.
// They do not contain actual data.
if (data.messageType === 'CONTROL_MESSAGE') {
return Promise.resolve({
recordId: r.recordId,
result: 'Dropped',
});
} else if (data.messageType === 'DATA_MESSAGE') {
const promises = data.logEvents.map(transformLogEvent);
return Promise.all(promises)
.then(transformed => {
const payload = transformed.reduce((a, v) => a + v, '');
const encoded = new Buffer(payload).toString('base64');
return {
recordId: r.recordId,
result: 'Ok',
data: encoded,
};
});
} else {
return Promise.resolve({
recordId: r.recordId,
result: 'ProcessingFailed',
});
}
})).then(recs => {
const isSas = Object.prototype.hasOwnProperty.call(event, 'sourceKinesisStreamArn');
const streamARN = isSas ? event.sourceKinesisStreamArn : event.deliveryStreamArn;
const region = streamARN.split(':')[3];
const streamName = streamARN.split('/')[1];
const result = { records: recs };
let recordsToReingest = [];
const putRecordBatches = [];
let totalRecordsToBeReingested = 0;
const inputDataByRecId = {};
event.records.forEach(r => inputDataByRecId[r.recordId] = createReingestionRecord(isSas, r));
let projectedSize = recs.filter(rec => rec.result === 'Ok')
.map(r => r.recordId.length + r.data.length)
.reduce((a, b) => a + b);
// 6000000 instead of 6291456 to leave ample headroom for the stuff we didn't account for
for (let idx = 0; idx < event.records.length && projectedSize > 6000000; idx++) {
const rec = result.records[idx];
if (rec.result === 'Ok') {
totalRecordsToBeReingested++;
recordsToReingest.push(getReingestionRecord(isSas, inputDataByRecId[rec.recordId]));
projectedSize -= rec.data.length;
delete rec.data;
result.records[idx].result = 'Dropped';
// split out the record batches into multiple groups, 500 records at max per group
if (recordsToReingest.length === 500) {
putRecordBatches.push(recordsToReingest);
recordsToReingest = [];
}
}
}
if (recordsToReingest.length > 0) {
// add the last batch
putRecordBatches.push(recordsToReingest);
}
if (putRecordBatches.length > 0) {
new Promise((resolve, reject) => {
let recordsReingestedSoFar = 0;
for (let idx = 0; idx < putRecordBatches.length; idx++) {
const recordBatch = putRecordBatches[idx];
if (isSas) {
const client = new AWS.Kinesis({ region: region });
putRecordsToKinesisStream(streamName, recordBatch, client, resolve, reject, 0, 20);
} else {
const client = new AWS.Firehose({ region: region });
putRecordsToFirehoseStream(streamName, recordBatch, client, resolve, reject, 0, 20);
}
recordsReingestedSoFar += recordBatch.length;
console.log('Reingested %s/%s records out of %s in to %s stream', recordsReingestedSoFar, totalRecordsToBeReingested, event.records.length, streamName);
}
}).then(
() => {
console.log('Reingested all %s records out of %s in to %s stream', totalRecordsToBeReingested, event.records.length, streamName);
callback(null, result);
},
failed => {
console.log('Failed to reingest records. %s', failed);
callback(failed, null);
});
} else {
console.log('No records needed to be reingested.');
callback(null, result);
}
}).catch(ex => {
console.log('Error: ', ex);
callback(ex, null);
});
};
Here is code that will solve the problem
__Author__ = "Soumil Nitin Shah"
import json
import boto3
import base64
class MyHasher(object):
def __init__(self, key):
self.key = key
def get(self):
keys = str(self.key).encode("UTF-8")
keys = base64.b64encode(keys)
keys = keys.decode("UTF-8")
return keys
def lambda_handler(event, context):
output = []
for record in event['records']:
payload = base64.b64decode(record['data'])
"""Get the payload from event bridge and just get data attr """""
serialize_payload = str(json.loads(payload)) + "\n"
hasherHelper = MyHasher(key=serialize_payload)
hash = hasherHelper.get()
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': hash
}
print("output_record", output_record)
output.append(output_record)
return {'records': output}

Duplicate existing contacts alert

In Vtiger 6.5.0 open source, I wants to create a alert function to warn users that the conact's mobile is existing? could you please help me. I'm fresher.
Thanks,
Loi
You can refer the function wich exist in Account module for checking Duplicate Account Name.
Please follow this files you will get an idea.
This is the code flow how its done In Account Module
Registring Pre Save Event
http://code.vtiger.com/vtiger/vtigercrm/blob/master/layouts/vlayout/modules/Accounts/resources/Edit.js#L250
This teh Fucntion to check Duplicate in cache, If not calls the Helper function
http://code.vtiger.com/vtiger/vtigercrm/blob/master/layouts/vlayout/modules/Accounts/resources/Edit.js#L83
This the Helper function which makes the call to server
http://code.vtiger.com/vtiger/vtigercrm/blob/master/resources/helper.js#L166
This is the action function which is responsible for Serving the request which came from Helper Function
http://code.vtiger.com/vtiger/vtigercrm/blob/master/modules/Accounts/actions/CheckDuplicate.php#L30
And this is the function which checks for Duplicate
http://code.vtiger.com/vtiger/vtigercrm/blob/master/modules/Accounts/models/Record.php#L57
Hope this helps.
Hi Victor please follow this steps
modules\Leads\actions\Checkprimaryemail.php
<?php
class Leads_Checkprimaryemail_Action extends Vtiger_BasicAjax_Action {
public function checkPermission(Vtiger_Request $request) {
return;
}
public function process(Vtiger_Request $request) {
global $adb;
$moduleName = $request->get('module');
$recordId = $request->get('recordId');
$primary_email = $request->get('primary_email');
/*Lead Details*/
$lead_query = "select * from vtiger_leaddetails
inner join vtiger_crmentity on vtiger_crmentity.crmid=vtiger_leaddetails.leadid
where vtiger_crmentity.deleted = 0 and vtiger_leaddetails.email='".$primary_email."'";
$lead_result = $adb->query($lead_query);
$lead_email = $adb->query_result($lead_result,0,'email');
$lead_numrows = $adb->num_rows($lead_result);
/*Contact Details*/
$cont_query = "select * from vtiger_contactdetails
inner join vtiger_crmentity on vtiger_crmentity.crmid=vtiger_contactdetails.contactid
where vtiger_crmentity.deleted = 0 and vtiger_contactdetails.email='".$primary_email."'";
$cont_result = $adb->query($cont_query);
$cont_email = $adb->query_result($cont_result,0,'email');
$cont_numrows = $adb->num_rows($cont_result);
if($recordId != '' ){
if($primary_email == $lead_email && $lead_numrows == 1 ){
$emailtrue = 0;
} elseif($primary_email == $cont_email && $cont_numrows >= 1 ) {
$emailtrue = 1;
}
} else {
if(($lead_numrows >=1 || $cont_numrows >=1 ) || ($lead_numrows >=1 && $cont_numrows >= 1) ){
$emailtrue = 1;
} else {
$emailtrue = 0;
}
}
$emailData = array($emailtrue);
$response = new Vtiger_Response();
$response->setResult($emailData);
$response->emit();
}
}
?>
After Create One other file
layouts\vlayout\modules\Leads\resources\Edit.js
Vtiger_Edit_Js("Leads_Edit_Js", {
}, {
changeEvent: function (container) {
jQuery('input[name="email"]').on('focusout', function (e) {
var email = jQuery('input[name="email"]').val();
var recordId = jQuery('input[name="record"]').val();
var email_length = email.length;
if (email != '') {
if (email_length > 100) {
var errorMessage = app.vtranslate('JS_EMAIL_LENGTH_VALIDATION');
params = {
text: errorMessage,
'type': 'error',
};
Vtiger_Helper_Js.showMessage(params);
}
var progressIndicatorElement = jQuery.progressIndicator({
'position': 'html',
'blockInfo': {
'enabled': true
}
});
var postData = {
"module": 'Leads',
"action": "Checkprimaryemail",
"primary_email": email,
"recordId": recordId
}
AppConnector.request(postData).then(
function (data) {
progressIndicatorElement.progressIndicator({'mode': 'hide'});
if (data['result'] == 1) {
jQuery('#emailalready_exists').val(1);
var errorMessage = app.vtranslate('JS_EMAIL_EXIST');
params = {
text: errorMessage,
'type': 'error',
};
Vtiger_Helper_Js.showMessage(params);
} else {
jQuery('#emailalready_exists').val(0);
}
},
function (error, err) {}
);
e.preventDefault();
}
});
},
registerBasicEvents: function (container) {
this._super(container);
this.changeEvent(container);
}
});
To check duplicate records in vTiger follow below steps:
Register checkDuplicate function in registerBasicEvents
1: \layouts\vlayout\modules\Contacts\resources\Edit.js
getmobile : function(container){
return jQuery('input[name="mobile"]',container).val();
},
getRecordId : function(container){
return jQuery('input[name="record"]',container).val();
},
DuplicateCheck : function(form) {
var thisInstance = this;
if(typeof form == 'undefined') {
form = this.getForm();
}
jQuery( "#mobileFieldId" ).change(function() {
var mobile = thisInstance.getmobile(form);
var recordId = thisInstance.getRecordId(form);
var params = {
'module' : "Contacts",
'action' : "CheckDuplicate",
'mobile' : mobile,
'record' : recordId
}
AppConnector.request(params).then(
function(data) {
var response = data['result'];
var result = response['success'];
if(result == true) {
var message_params = {
title : app.vtranslate('JS_MESSAGE'),
text: response['message'],
animation: 'show',
type: 'error'
};
Vtiger_Helper_Js.showPnotify(message_params);
jQuery(".btn-success").attr('disabled',true);
return false;
} else {
jQuery(".btn-success").attr('disabled',false);
}
}
);
});
},
2: Create new file in** \modules\Contacts\actions\CheckDuplicate.php
Follow the same process / code as given in \modules\Accounts\actions\CheckDuplicate.php
3: Add new function checkDuplicate() in \modules\Contacts\models\Record.php
And follow same process as given in \modules\Accounts\models\Record.php having function checkDuplicate()
Note: Don't forget to change the db table name, class name module wise.
Hope this will help you. Thank you.

Dart: How to test if stream emits elements at a certain time?

I try to test a function Stream transform(Stream input). How can I test if the returned stream emits elements at a certain time?
In RxJS (JavaScript) I can use a TestScheduler to emit elements on the input stream at a certain time and test if they are emitted on the output stream at a certain time. In this example, the transform function is passed to scheduler.startWithCreate:
var scheduler = new Rx.TestScheduler();
// Create hot observable which will start firing
var xs = scheduler.createHotObservable(
onNext(150, 1),
onNext(210, 2),
onNext(220, 3),
onCompleted(230)
);
// Note we'll start at 200 for subscribe, hence missing the 150 mark
var res = scheduler.startWithCreate(function () {
return xs.map(function (x) { return x * x });
});
// Implement collection assertion
collectionAssert.assertEqual(res.messages, [
onNext(210, 4),
onNext(220, 9),
onCompleted(230)
]);
// Check for subscribe/unsubscribe
collectionAssert.assertEqual(xs.subscriptions, [
subscribe(200, 230)
]);
Update: Released my code as a package called stream_test_scheduler.
This code works similar to TestScheduler in RxJS, but it uses real time (milliseconds) instead of virtual time, since you can't fake time in Dart (see Irn's comment). You can pass a maximum deviation to the matcher. I use 20 milliseconds in this example. But deviation varies. You might have to use a different maximum deviation value for another test or on another (faster/slower) system.
Edit: I changed the example to a delay transform function, which is a shorter (less configurable/parameters) version of the delay function of stream_ext package. The test checks if elements are delayed by one second.
import 'dart:async';
import 'package:test/test.dart';
// To test:
/// Modified version of <https://github.com/theburningmonk/stream_ext/wiki/delay>
Stream delay(Stream input, Duration duration) {
var controller = new StreamController.broadcast(sync : true);
delayCall(Function f, [Iterable args]) => args == null
? new Timer(duration, f)
: new Timer(duration, () => Function.apply(f, args));
input.listen(
(x) => delayCall(_tryAdd, [controller, x]),
onError : (ex) => delayCall(_tryAddError, [ex]),
onDone : () => delayCall(_tryClose, [controller])
);
return controller.stream;
}
_tryAdd(StreamController controller, event) {
if (!controller.isClosed) controller.add(event);
}
_tryAddError(StreamController controller, err) {
if (!controller.isClosed) controller.addError(err);
}
_tryClose(StreamController controller) {
if (!controller.isClosed) controller.close();
}
main() async {
test('delay preserves relative time intervals between the values', () async {
var scheduler = new TestScheduler();
var source = scheduler.createStream([
onNext(150, 1),
onNext(210, 2),
onNext(220, 3),
onCompleted(230)
]);
var result = await scheduler.startWithCreate(() => delay(source, ms(1000)));
expect(result, equalsRecords([
onNext(1150, 1),
onNext(1210, 2),
onNext(1220, 3),
onCompleted(1230)
], maxDeviation: 20));
});
}
equalsRecords(List<Record> records, {int maxDeviation: 0}) {
return pairwiseCompare(records, (Record r1, Record r2) {
var deviation = (r1.ticks.inMilliseconds - r2.ticks.inMilliseconds).abs();
if (deviation > maxDeviation) {
return false;
}
if (r1 is OnNextRecord && r2 is OnNextRecord) {
return r1.value == r2.value;
}
if (r1 is OnErrorRecord && r2 is OnErrorRecord) {
return r1.exception == r2.exception;
}
return (r1 is OnCompletedRecord && r2 is OnCompletedRecord);
}, 'equal with deviation of ${maxDeviation}ms to');
}
class TestScheduler {
final SchedulerTasks _tasks;
TestScheduler() : _tasks = new SchedulerTasks();
Stream createStream(List<Record> records) {
final controller = new StreamController(sync: true);
_tasks.add(controller, records);
return controller.stream;
}
Future<List<Record>> startWithCreate(Stream createStream()) {
final completer = new Completer<List<Record>>();
final records = <Record>[];
final start = new DateTime.now();
int timeStamp() {
final current = new DateTime.now();
return current.difference(start).inMilliseconds;
}
createStream().listen(
(event) => records.add(onNext(timeStamp(), event)),
onError: (exception) => records.add(onError(timeStamp(), exception)),
onDone: () {
records.add(onCompleted(timeStamp()));
completer.complete(records);
}
);
_tasks.run();
return completer.future;
}
}
class SchedulerTasks {
Map<Record, StreamController> _controllers = {};
List<Record> _records = [];
void add(StreamController controller, List<Record> records) {
for (var record in records) {
_controllers[record] = controller;
}
_records.addAll(records);
}
void run() {
_records.sort();
for (var record in _records) {
final controller = _controllers[record];
new Future.delayed(record.ticks, () {
if (record is OnNextRecord) {
controller.add(record.value);
} else if (record is OnErrorRecord) {
controller.addError(record.exception);
} else if (record is OnCompletedRecord) {
controller.close();
}
});
}
}
}
onNext(int ticks, int value) => new OnNextRecord(ms(ticks), value);
onCompleted(int ticks) => new OnCompletedRecord(ms(ticks));
onError(int ticks, exception) => new OnErrorRecord(ms(ticks), exception);
Duration ms(int milliseconds) => new Duration(milliseconds: milliseconds);
abstract class Record implements Comparable {
final Duration ticks;
Record(this.ticks);
#override
int compareTo(other) => ticks.compareTo(other.ticks);
}
class OnNextRecord extends Record {
final value;
OnNextRecord(Duration ticks, this.value) : super (ticks);
#override
String toString() => 'onNext($value)#${ticks.inMilliseconds}';
}
class OnErrorRecord extends Record {
final exception;
OnErrorRecord(Duration ticks, this.exception) : super (ticks);
#override
String toString() => 'onError($exception)#${ticks.inMilliseconds}';
}
class OnCompletedRecord extends Record {
OnCompletedRecord(Duration ticks) : super (ticks);
#override
String toString() => 'onCompleted()#${ticks.inMilliseconds}';
}