how to impliment Haystacksearch fetched autocomplete - django

I want implement fetching in autocomplete, here is my autocomplete function
def autocomplete(request):
fetch_field = request.GET.get('fetch_field')
sqs = SearchQuerySet().autocomplete(
content_auto=request.GET.get(
'query',
''))[
:5]
s = []
for result in sqs:
d = {"value": result.title, "data": result.object.slug}
s.append(d)
output = {'suggestions': s}
print('hihi' ,output)
return JsonResponse(output)
Now I can get fetch fields but I don't know how to fetch with SearchQuerySet.

sqs = SearchQuerySet().filter(field_want_to_fetch = fetch_field ).autocomplete(
content_auto=request.GET.get(
'query',
''))[
:5]
Use this !!

Related

Power BI Iterative API Loop

I am attempting (and can successfully do so) to connect to an API and loop through several iterations of the API call in order to grab the next_page value, put it in a list and then call the list.
Unfortunately, when this is published to the PBI service I am unable to refresh there and indeed 'Data Source Settings' tells me I have a 'hand-authored query'.
I have attempted to follow Chris Webbs' blog post around the usage of query parameters and relative path, but if I use this I just get a constant loop of the first page that's hit.
The Start Epoch Time is a helper to ensure I only grab data less than 3 months old.
let
iterations = 10000, // Number of MAXIMUM iterations
url = "https://www.zopim.com/api/v2/" & "incremental/" & "chats?fields=chats(*)" & "&start_time=" & Number.ToText( StartEpochTime ),
FnGetOnePage =
(url) as record =>
let
Source1 = Json.Document(Web.Contents(url, [Headers=[Authorization="Bearer MY AUTHORIZATION KEY"]])),
data = try Source1[chats] otherwise null, //get the data of the first page
next = try Source1[next_page] otherwise null, // the script ask if there is another page*//*
res = [Data=data, Next=next]
in
res,
GeneratedList =
List.Generate(
()=>[i=0, res = FnGetOnePage(url)],
each [i]<iterations and [res][Data]<>null,
each [i=[i]+1, res = FnGetOnePage([res][Next])],
each [res][Data])
Lookups
If Source1 exists, but [chats] may not, you can simplify
= try Source1[chats] otherwise null
to
= Source1[chats]?
Plus it you don't lose non-lookup errors.
m-spec-operators
Chris Web Method
should be something closer to this.
let
Headers = [
Accept="application/json"
],
BaseUrl = "https://www.zopim.com", // very important
Options = [
RelativePath = "api/v2/incremental/chats",
Headers = [
Accept="application/json"
],
Query = [
fields = "chats(*)",
start_time = Number.ToText( StartEpocTime )
],
Response = Web.Contents(BaseUrl, Options),
Result = Json.Document(Response) // skip if it's not JSON
in
Result
Here's an example of a reusable Web.Contents function
helper function
let
/*
from: <https://github.com/ninmonkey/Ninmonkey.PowerQueryLib/blob/master/source/WebRequest_Simple.pq>
Wrapper for Web.Contents returns response metadata
for options, see: <https://learn.microsoft.com/en-us/powerquery-m/web-contents#__toc360793395>
Details on preventing "Refresh Errors", using 'Query' and 'RelativePath':
- Not using Query and Relative path cause refresh errors:
<https://blog.crossjoin.co.uk/2016/08/23/web-contents-m-functions-and-dataset-refresh-errors-in-power-bi/>
- You can opt-in to Skip-Test:
<https://blog.crossjoin.co.uk/2019/04/25/skip-test-connection-power-bi-refresh-failures/>
- Debugging and tracing the HTTP requests
<https://blog.crossjoin.co.uk/2019/11/17/troubleshooting-web-service-refresh-problems-in-power-bi-with-the-power-query-diagnostics-feature/>
update:
- MaybeErrResponse: Quick example of parsing an error result.
- Raw text is returned, this is useful when there's an error
- now response[json] does not throw, when the data isn't json to begin with (false errors)
*/
WebRequest_Simple
= (
base_url as text,
optional relative_path as nullable text,
optional options as nullable record
)
as record =>
let
headers = options[Headers]?, //or: ?? [ Accept = "application/json" ],
merged_options = [
Query = options[Query]?,
RelativePath = relative_path,
ManualStatusHandling = options[ManualStatusHandling]? ?? { 400, 404, 406 },
Headers = headers
],
bytes = Web.Contents(base_url, merged_options),
response = Binary.Buffer(bytes),
response_metadata = Value.Metadata( bytes ),
status_code = response_metadata[Response.Status]?,
response_text = Text.Combine( Lines.FromBinary(response,null,null, TextEncoding.Utf8), "" ),
json = Json.Document(response),
IsJsonX = not (try json)[HasError],
Final = [
request_url = metadata[Content.Uri](),
response_text = response_text,
status_code = status_code,
metadata = response_metadata,
IsJson = IsJsonX,
response = response,
json = if IsJsonX then json else null
]
in
Final,
tests = {
WebRequest_Simple("https://httpbin.org", "json"), // expect: json
WebRequest_Simple("https://www.google.com"), // expect: html
WebRequest_Simple("https://httpbin.org", "/headers"),
WebRequest_Simple("https://httpbin.org", "/status/codes/406"), // exect 404
WebRequest_Simple("https://httpbin.org", "/status/406"), // exect 406
WebRequest_Simple("https://httpbin.org", "/get", [ Text = "Hello World"])
},
FinalResults = Table.FromRecords(tests,
type table[
status_code = Int64.Type, request_url = text,
metadata = record,
response_text = text,
IsJson = logical, json = any,
response = binary
],
MissingField.Error
)
in
FinalResults

Properly return a label in post-annotation lambda for AWS SageMaker Ground Truth custom labeling job

I'm working on a SageMaker labeling job with custom datatypes. For some reason though, I'm not getting the correct label in the AWS web console. It should have the selected label which is "Native", but instead, I'm getting the <labelattributename> which is "new-test-14".
After Ground Truth runs the post-annotation lambda, it seems to modify the metadata before returning a data object. The data object it returns doesn't contain a class-name key inside the metadata attribute, even when I hard-code the lambda to return an object that contains it.
My manifest file looks like this:
{"source-ref" : "s3://<file-name>", "text" : "Hello world"}
{"source-ref" : "s3://"<file-name>", "text" : "Hello world"}
And the worker response looks like this:
{"answers":[{"acceptanceTime":"2021-05-18T16:08:29.473Z","answerContent":{"new-test-14":{"label":"Native"}},"submissionTime":"2021-05-18T16:09:15.960Z","timeSpentInSeconds":46.487,"workerId":"private.us-east-1.ea05a03fcd679cbb","workerMetadata":{"identityData":{"identityProviderType":"Cognito","issuer":"https://cognito-idp.us-east-1.amazonaws.com/us-east-1_XPxQ9txEq","sub":"edc59ce1-e09d-4551-9e0d-a240465ea14a"}}}]}
That worker response gets processed by my post-annotation lambda which is modeled after this aws sample ground truth recipe. Here's my code:
import json
import sys
import boto3
from datetime import datetime
def lambda_handler(event, context):
# Event received
print("Received event: " + json.dumps(event, indent=2))
labeling_job_arn = event["labelingJobArn"]
label_attribute_name = event["labelAttributeName"]
label_categories = None
if "label_categories" in event:
label_categories = event["labelCategories"]
print(" Label Categories are : " + label_categories)
payload = event["payload"]
role_arn = event["roleArn"]
output_config = None # Output s3 location. You can choose to write your annotation to this location
if "outputConfig" in event:
output_config = event["outputConfig"]
# If you specified a KMS key in your labeling job, you can use the key to write
# consolidated_output to s3 location specified in outputConfig.
# kms_key_id = None
# if "kmsKeyId" in event:
# kms_key_id = event["kmsKeyId"]
# # Create s3 client object
# s3_client = S3Client(role_arn, kms_key_id)
s3_client = boto3.client('s3')
# Perform consolidation
return do_consolidation(labeling_job_arn, payload, label_attribute_name, s3_client)
def do_consolidation(labeling_job_arn, payload, label_attribute_name, s3_client):
"""
Core Logic for consolidation
:param labeling_job_arn: labeling job ARN
:param payload: payload data for consolidation
:param label_attribute_name: identifier for labels in output JSON
:param s3_client: S3 helper class
:return: output JSON string
"""
# Extract payload data
if "s3Uri" in payload:
s3_ref = payload["s3Uri"]
payload_bucket, payload_key = s3_ref.split('/',2)[-1].split('/',1)
payload = json.loads(s3_client.get_object(Bucket=payload_bucket, Key=payload_key)['Body'].read())
# print(payload)
# Payload data contains a list of data objects.
# Iterate over it to consolidate annotations for individual data object.
consolidated_output = []
success_count = 0 # Number of data objects that were successfully consolidated
failure_count = 0 # Number of data objects that failed in consolidation
for p in range(len(payload)):
response = None
dataset_object_id = payload[p]['datasetObjectId']
log_prefix = "[{}] data object id [{}] :".format(labeling_job_arn, dataset_object_id)
print("{} Consolidating annotations BEGIN ".format(log_prefix))
annotations = payload[p]['annotations']
# print("{} Received Annotations from all workers {}".format(log_prefix, annotations))
# Iterate over annotations. Log all annotation to your CloudWatch logs
annotationsFromAllWorkers = []
for i in range(len(annotations)):
worker_id = annotations[i]["workerId"]
anotation_data = annotations[i]["annotationData"]
annotation_content = anotation_data["content"]
annotation_content_json = json.loads(annotation_content)
annotation_job = annotation_content_json["new_test"]
annotation_label = annotation_job["label"]
consolidated_annotation= {
"workerId": worker_id,
"annotationData": {
"content": {
"annotatedResult": {
"instances": [{"label":annotation_label }]
}
}
}
}
annotationsFromAllWorkers.append(consolidated_annotation)
consolidated_annotation = {"annotationsFromAllWorkers": annotationsFromAllWorkers} # TODO : Add your consolidation logic
# Build consolidation response object for an individual data object
response = {
"datasetObjectId": dataset_object_id,
"consolidatedAnnotation": {
"content": {
label_attribute_name: consolidated_annotation,
label_attribute_name+ "-metadata": {
"class-name": "Native",
"confidence": 0.00,
"human-annotated": "yes",
"creation-date": datetime.strftime(datetime.now(), "%Y-%m-%dT%H:%M:%S"),
"type": "groundtruth/custom"
}
}
}
}
success_count += 1
# print("{} Consolidating annotations END ".format(log_prefix))
# Append individual data object response to the list of responses.
if response is not None:
consolidated_output.append(response)
failure_count += 1
print(" Consolidation failed for dataobject {}".format(p))
print(" Unexpected error: Consolidation failed." + str(sys.exc_info()[0]))
print("Consolidation Complete. Success Count {} Failure Count {}".format(success_count, failure_count))
print(" -- Consolidated Output -- ")
print(consolidated_output)
print(" ------------------------- ")
return consolidated_output
As you can see above, the do_consolidation method returns an object hard-coded to include a class-name of "Native", and the lambda_handler method returns that same object. Here's the post-annotation function response:
[{
"datasetObjectId": "4",
"consolidatedAnnotation": {
"content": {
"new-test-14": {
"annotationsFromAllWorkers": [{
"workerId": "private.us-east-1.ea05a03fcd679cbb",
"annotationData": {
"content": {
"annotatedResult": {
"instances": [{
"label": "Native"
}]
}
}
}
}]
},
"new-test-14-metadata": {
"class-name": "Native",
"confidence": 0,
"human-annotated": "yes",
"creation-date": "2021-05-19T07:06:06",
"type": "groundtruth/custom"
}
}
}
}]
As you can see, the post-annotation function return value has the class-name of "Native" in the metadata so I would expect the class-name to be present in the data object metadata, but it's not. And here's a screenshot of the data object summary:
It seems like Ground Truth overwrote the metadata, and now the object doesn't contain the correct label. I think perhaps that's why my label is coming through as the label attribute name "new-test-14" instead of as the correct label "Native". Here's a screenshot of the labeling job in the AWS web console:
The web console is supposed to show the label "Native" inside the "Label" column but instead I'm getting the <labelattributename> "new-test-14" in the label column.
Here is the output.manifest file generated by Ground Truth at the end:
{
"source-ref": "s3://<file-name>",
"text": "Hello world",
"new-test-14": {
"annotationsFromAllWorkers": [{
"workerId": "private.us-east-1.ea05a03fcd679ert",
"annotationData": {
"content": {
"annotatedResult": {
"label": "Native"
}
}
}
}]
},
"new-test-14-metadata": {
"type": "groundtruth/custom",
"job-name": "new-test-14",
"human-annotated": "yes",
"creation-date": "2021-05-18T12:34:17.400000"
}
}
What should I return from the Post-Annotation function? Am I missing something in my response? How do I get the proper label to appear in the AWS web console?

Bittrex REST API for Python, I want to create an order using API v3 https://api.bittrex.com/v3/orders

I need help to create orders using the bittrex version 3 REST API. I have the code below and I can't understand what is missing to work.
I can make other GET calls, but I cannot make this POST request.
I don't know how to deal with the passing of parameters.
Official documentation at https://bittrex.github.io/api/v3#tag-Orders.
def NewOrder(market, amount, price):
#print 'open sell v3', market
market = 'HEDG-BTC'#'BTC-'+market
uri = 'https://api.bittrex.com/v3/orders?'
params = {
'marketSymbol': 'BTC-HEDG',#'HEDG-BTC', #market
'direction': 'BUY',
'type': 'LIMIT',
'quantity': amount,
'limit': price,
'timeInForce': 'POST_ONLY_GOOD_TIL_CANCELLED',
'useAwards': True
}
timestamp = str(int(time.time()*1000))
Content = ""
contentHash = hashlib.sha512(Content.encode()).hexdigest()
Method = 'POST'
uri2 = buildURI(uri, params)
#uri2 = 'https://api.bittrex.com/v3/orders?direction=BUY&limit=0.00021&marketSymbol=HEDG-BTC&quantity=1.1&timeInForce=POST_ONLY_GOOD_TIL_CANCELLED&type=LIMIT&useAwards=True'
#print uri2
PreSign = timestamp + uri2 + Method + contentHash# + subaccountId
#print PreSign
Signature = hmac.new(apisecret, PreSign.encode(), hashlib.sha512).hexdigest()
headers = {
'Api-Key' : apikey,
'Api-Timestamp' : timestamp,
'Api-Content-Hash': contentHash,
'Api-Signature' : Signature
}
r = requests.post(uri2, data={}, headers=headers, timeout=11)
return json.loads(r.content)
NewOrder('HEDG', 1.1, 0.00021)
And my error message:
{u'code': u'BAD_REQUEST', u'data': {u'invalidRequestParameter': u'direction'}, u'detail': u'Refer to the data field for specific field validation failures.'}
It seems from the documentation that this body is expected by the api as json data:
{
"marketSymbol": "string",
"direction": "string",
"type": "string",
"quantity": "number (double)",
"ceiling": "number (double)",
"limit": "number (double)",
"timeInForce": "string",
"clientOrderId": "string (uuid)",
"useAwards": "boolean"
}
and you are setting these values as url params that's the issue.
you need to do this:
uri = 'https://api.bittrex.com/v3/orders'
# NOTE >>>> please check that you provide all the required fields.
payload = {
'marketSymbol': 'BTC-HEDG',#'HEDG-BTC', #market
'direction': 'BUY',
'type': 'LIMIT',
'quantity': amount,
'limit': price,
'timeInForce': 'POST_ONLY_GOOD_TIL_CANCELLED',
'useAwards': True
}
# do rest of the stuffs as you are doing
# post payload as json data with the url given in doc
r = requests.post(uri, json=payload, headers=headers, timeout=11)
print(r.json())
If you still have issues let us know. If it works then please mark answer as accepted.
Hope this helps.
I made the following modifications to the code but started to give error in 'Content-Hash'
I'm assuming that some parameters are optional so they are commented.
def NewOrder(market, amount, price):
market = 'BTC-'+market
uri = 'https://api.bittrex.com/v3/orders'
payload = {
'marketSymbol': market,
'direction': 'BUY',
'type': 'LIMIT',
'quantity': amount,
#"ceiling": "number (double)",
'limit': price,
'timeInForce': 'POST_ONLY_GOOD_TIL_CANCELLED',
#"clientOrderId": "string (uuid)",
'useAwards': True
}
#ceiling (optional, must be included for ceiling orders and excluded for non-ceiling orders)
#clientOrderId (optional) client-provided identifier for advanced order tracking
timestamp = str(int(time.time()*1000))
Content = ''+json.dumps(payload, separators=(',',':'))
print Content
contentHash = hashlib.sha512(Content.encode()).hexdigest()
Method = 'POST'
#uri2 = buildURI(uri, payload)#line not used
print uri
#PreSign = timestamp + uri2 + Method + contentHash# + subaccountId
PreSign = timestamp + uri + Method + contentHash# + subaccountId
print PreSign
Signature = hmac.new(apisecret, PreSign.encode(), hashlib.sha512).hexdigest()
headers = {
'Api-Key' : apikey,
'Api-Timestamp' : timestamp,
'Api-Content-Hash': contentHash,
'Api-Signature' : Signature
}
r = requests.post(uri, json=payload, headers=headers, timeout=11)
print(r.json())
return json.loads(r.content)
NewOrder('HEDG', 1.5, 0.00021)
{u'code': u'INVALID_CONTENT_HASH'}
Bittrex API via requests package PYTHON
import hmac
import hashlib
import time, requests
nonce = str(int(time.time() * 1000))
content_hash = hashlib.sha512(''.encode()).hexdigest()
signature = hmac.new(
'<SECRET_KEY>'.encode(),
''.join([nonce, url, 'GET', content_hash]).encode(),
hashlib.sha512
).hexdigest()
headers = {
'Api-Timestamp': nonce,
'Api-Key': '<API_KEY>',
'Content-Type': 'application/json',
'Api-Content-Hash': content_hash,
'Api-Signature': signature
}
result = requests.get(url=url, headers=headers)

How to retrieve multiple items from Dynamo DB using AWS lambda

How to get multiple items from DB. the below code throws me an error as it fetches only one item. I am retrieving the items based on email value.
import json
import os
import boto3
import decimalencoder
dynamodb = boto3.resource('dynamodb')
def get(event, context):
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
# fetch a person from the database
result = table.get_item(
Key={
'email': event['pathParameters']['email']
}
)
# create a response
response = {
"statusCode": 200,
"body": json.dumps(result['Item'], cls=decimalencoder.DecimalEncoder),
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": "true"
}
}
return response
To retrive multiple rows from db, first query on id you want data to be filtered.
Then maintain a list to store all row values in it.
def lambda_handler(event,context):
item = table.query(
KeyConditionExpression=Key('hubID').eq(hubId)
)
if (item["Count"] == 0):
response = {"msg": "Item not exist, can't perform READ"}
else:
i = 1
lst = []
while i < item["Count"]:
response = {
"hubId" : item["Items"][i]["hubID"],
"deviceState": int(item["Items"][i]["deviceState"]),
"deviceId": item["Items"][i]["deviceID"],
"deviceType": item["Items"][i]["deviceType"],
"intensity": int(item["Items"][i]["intensity"])
}
lst.append(response)
i += 1
print(lst)
response = lst
return response

in tastypie, how can i set name for json result

In tastypie, I want set json result name.
I have a class that I use for it but I can set name in.
enter cclass ContentResource(ModelResource):
class Meta:
results = ListField(attribute='results')
queryset = Content.objects.all()
resource_name = 'content'
max_limit = None
#filtering = {"title": "contains"}
def alter_list_data_to_serialize(self, request, data_dict):
if isinstance(data_dict, dict):
if 'meta' in data_dict:
# Get rid of the "meta".
del(data_dict['meta'])
# Rename the objects.
data_dict['Mobile'] = data_dict['objects']
del(data_dict['objects'])
return data_dict
ode here it returns this
{"Mobile":
[
{
"added": "2015-07-23T11:30:20.911835",
"content_cast": "",
"content_company": "HamrahCinema",
"content_description": "so nice",
"content_director": "",
"content_duration": "2:20",
"content_filelanguage": null,
}
]
}
when I use /content/api/content every thing is ok, but when I use /content/api/content/1,"mobile" is removed.
as educated guess, I would suggest using alter_detail_data_to_serialize