Python script for creating Space on Cloud Confluence - confluence-rest-api

I have over 700 Projects for which I have to create Spaces on Confluence Cloud. Doing this manually is not feasible. Have to come up with some script or tool that reads the information from Text File and creates the spaces. I have programming background - Python and Java. Can someone suggest a solution for achieving this please?

Here is the code snippet for reading the space and page names from excel and creating them in Confluence cloud -
import requests
import json
from requests.auth import HTTPBasicAuth
from openpyxl import load_workbook
from collections import defaultdict
filename = "path/PjtExport.xlsx"
wb = load_workbook(filename)
sheet = wb["Sheet1"]
confdict = defaultdict(list)
for i1 in range(2,10) :
if(str(sheet.cell(row=i1, column=1).value) != "None") :
spaceName = str(sheet.cell(row=i1, column=1).value)
keyName = spaceName.replace(" ","")
#print(keyName)
if not(confdict.keys().__contains__(spaceName)):
confdict.setdefault(keyName,spaceName)
#print(spaceName)
url = 'https://domain.atlassian.net/wiki/rest/api/space'
auth = HTTPBasicAuth('xxx#yyy.com', 'abcabcabc')
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
payload = json.dumps({
"key": keyName,
"name": spaceName,
"description": {
"plain": {
"value": "Test space created in Python - XL Int",
"representation": "storage"
}
},
})
response = requests.request(
"POST",
url,
data=payload,
headers=headers,
auth=auth
)
res_data = response.json()
spaceID = str((res_data["homepage"]["id"]))
pageName = str(sheet.cell(row=i1, column=2).value)
url = "https://domain.atlassian.net/wiki/rest/api/content/aaaa/pagehierarchy/copy"
auth = HTTPBasicAuth('xxx#yyy.com', 'abcabcabc')
headers = {
"Content-Type": "application/json"
}
payload = json.dumps({
"copyAttachments": True,
"copyPermissions": True,
"copyProperties": True,
"copyLabels": True,
"copyCustomContents": True,
"destinationPageId": spaceID,
"titleOptions": {
"prefix": pageName + " ",
"replace": "",
"search": ""
}
})
response = requests.request(
"POST",
url,
data=payload,
headers=headers,
auth=auth
)
page_data = response.json()
#spaceID = str((res_data["homepage"]["id"]))
print(page_data)

Related

cannot post a new DB to apache-superset, 400 Error with CSRF

I am unsure how I can set the CSRF token so apache superset is happy.. here is my code
Create a DB Connection for Apache Superset
import requests
import json
base_url = "http://SOMEIP:8088/"
login_url = base_url + "api/v1/security/login"
csrf_url = base_url + "api/v1/security/csrf_token"
def create_db_connection(session, db_ip, db_port, db_user, db_pass, db_name):
url = base_url + "api/v1/database/"
sqlalchemy_url = "postgresql://" + db_user + ":" + db_pass + "#" + db_ip + ":" + str(db_port)+"/" + db_name
data_out = {
"allow_csv_upload": True,
"allow_ctas": True,
"allow_cvas": True,
"allow_dml": True,
"allow_multi_schema_metadata_fetch": True,
"allow_run_async": True,
"cache_timeout": 0,
"database_name": db_name,
"expose_in_sqllab": True,
"impersonate_user": True,
"sqlalchemy_uri": sqlalchemy_url
}
response = session.post(url=url, headers=dict(Referrer=login_url), json=data_out)
response.raise_for_status()
return response
if __name__ == "__main__":
username = "mysupersetuser"
password = "mysupersetpass"
db_user = "MYUSER"
db_password = "MYDBPASS"
db_host = "MYPOSTGRESIP"
db_port = 5432
db_name = "MYDBNAME"
session = requests.session()
login_url = base_url + "api/v1/security/login"
login_data = {
"password": password,
"provider": "db",
"refresh": False,
"username": username
}
response = session.post(url=login_url, json=login_data)
response.raise_for_status()
head = {
"Authorization": "Bearer " + json.loads(response.text)['access_token']
}
response = session.get(csrf_url, headers=head)
response.raise_for_status()
response = create_db_connection(session, db_ip=db_host, db_port=db_port, db_user=db_user, db_pass=db_password, db_name=db_name)
print(str(response.text))
I see multiple ways people are stating to set the CSRF token.. but none have worked so far.
Always it's the same response 400
'{"errors": [{"message": "400 Bad Request: The CSRF token is missing.", "error_type": "GENERIC_BACKEND_ERROR", "level": "error", "extra": {"issue_codes": [{"code": 1011, "message": "Issue 1011 - Superset encountered an unexpected error."}]}}]}'
It looks like Apache-Superset uses Flask-AppBuilder which uses Flask_WTF. You should be able to set it in the request headers.
def create_db_connection(session, db_ip, db_port, db_user, db_pass, db_name, token_from_your_crsf_url):
headers = {
'Referrer': login_url,
'X-CSRFToken': token_from_your_crsf_url
}
...
response = session.post(url=url, headers=headers, json=data_out)
...

Properly return a label in post-annotation lambda for AWS SageMaker Ground Truth custom labeling job

I'm working on a SageMaker labeling job with custom datatypes. For some reason though, I'm not getting the correct label in the AWS web console. It should have the selected label which is "Native", but instead, I'm getting the <labelattributename> which is "new-test-14".
After Ground Truth runs the post-annotation lambda, it seems to modify the metadata before returning a data object. The data object it returns doesn't contain a class-name key inside the metadata attribute, even when I hard-code the lambda to return an object that contains it.
My manifest file looks like this:
{"source-ref" : "s3://<file-name>", "text" : "Hello world"}
{"source-ref" : "s3://"<file-name>", "text" : "Hello world"}
And the worker response looks like this:
{"answers":[{"acceptanceTime":"2021-05-18T16:08:29.473Z","answerContent":{"new-test-14":{"label":"Native"}},"submissionTime":"2021-05-18T16:09:15.960Z","timeSpentInSeconds":46.487,"workerId":"private.us-east-1.ea05a03fcd679cbb","workerMetadata":{"identityData":{"identityProviderType":"Cognito","issuer":"https://cognito-idp.us-east-1.amazonaws.com/us-east-1_XPxQ9txEq","sub":"edc59ce1-e09d-4551-9e0d-a240465ea14a"}}}]}
That worker response gets processed by my post-annotation lambda which is modeled after this aws sample ground truth recipe. Here's my code:
import json
import sys
import boto3
from datetime import datetime
def lambda_handler(event, context):
# Event received
print("Received event: " + json.dumps(event, indent=2))
labeling_job_arn = event["labelingJobArn"]
label_attribute_name = event["labelAttributeName"]
label_categories = None
if "label_categories" in event:
label_categories = event["labelCategories"]
print(" Label Categories are : " + label_categories)
payload = event["payload"]
role_arn = event["roleArn"]
output_config = None # Output s3 location. You can choose to write your annotation to this location
if "outputConfig" in event:
output_config = event["outputConfig"]
# If you specified a KMS key in your labeling job, you can use the key to write
# consolidated_output to s3 location specified in outputConfig.
# kms_key_id = None
# if "kmsKeyId" in event:
# kms_key_id = event["kmsKeyId"]
# # Create s3 client object
# s3_client = S3Client(role_arn, kms_key_id)
s3_client = boto3.client('s3')
# Perform consolidation
return do_consolidation(labeling_job_arn, payload, label_attribute_name, s3_client)
def do_consolidation(labeling_job_arn, payload, label_attribute_name, s3_client):
"""
Core Logic for consolidation
:param labeling_job_arn: labeling job ARN
:param payload: payload data for consolidation
:param label_attribute_name: identifier for labels in output JSON
:param s3_client: S3 helper class
:return: output JSON string
"""
# Extract payload data
if "s3Uri" in payload:
s3_ref = payload["s3Uri"]
payload_bucket, payload_key = s3_ref.split('/',2)[-1].split('/',1)
payload = json.loads(s3_client.get_object(Bucket=payload_bucket, Key=payload_key)['Body'].read())
# print(payload)
# Payload data contains a list of data objects.
# Iterate over it to consolidate annotations for individual data object.
consolidated_output = []
success_count = 0 # Number of data objects that were successfully consolidated
failure_count = 0 # Number of data objects that failed in consolidation
for p in range(len(payload)):
response = None
dataset_object_id = payload[p]['datasetObjectId']
log_prefix = "[{}] data object id [{}] :".format(labeling_job_arn, dataset_object_id)
print("{} Consolidating annotations BEGIN ".format(log_prefix))
annotations = payload[p]['annotations']
# print("{} Received Annotations from all workers {}".format(log_prefix, annotations))
# Iterate over annotations. Log all annotation to your CloudWatch logs
annotationsFromAllWorkers = []
for i in range(len(annotations)):
worker_id = annotations[i]["workerId"]
anotation_data = annotations[i]["annotationData"]
annotation_content = anotation_data["content"]
annotation_content_json = json.loads(annotation_content)
annotation_job = annotation_content_json["new_test"]
annotation_label = annotation_job["label"]
consolidated_annotation= {
"workerId": worker_id,
"annotationData": {
"content": {
"annotatedResult": {
"instances": [{"label":annotation_label }]
}
}
}
}
annotationsFromAllWorkers.append(consolidated_annotation)
consolidated_annotation = {"annotationsFromAllWorkers": annotationsFromAllWorkers} # TODO : Add your consolidation logic
# Build consolidation response object for an individual data object
response = {
"datasetObjectId": dataset_object_id,
"consolidatedAnnotation": {
"content": {
label_attribute_name: consolidated_annotation,
label_attribute_name+ "-metadata": {
"class-name": "Native",
"confidence": 0.00,
"human-annotated": "yes",
"creation-date": datetime.strftime(datetime.now(), "%Y-%m-%dT%H:%M:%S"),
"type": "groundtruth/custom"
}
}
}
}
success_count += 1
# print("{} Consolidating annotations END ".format(log_prefix))
# Append individual data object response to the list of responses.
if response is not None:
consolidated_output.append(response)
failure_count += 1
print(" Consolidation failed for dataobject {}".format(p))
print(" Unexpected error: Consolidation failed." + str(sys.exc_info()[0]))
print("Consolidation Complete. Success Count {} Failure Count {}".format(success_count, failure_count))
print(" -- Consolidated Output -- ")
print(consolidated_output)
print(" ------------------------- ")
return consolidated_output
As you can see above, the do_consolidation method returns an object hard-coded to include a class-name of "Native", and the lambda_handler method returns that same object. Here's the post-annotation function response:
[{
"datasetObjectId": "4",
"consolidatedAnnotation": {
"content": {
"new-test-14": {
"annotationsFromAllWorkers": [{
"workerId": "private.us-east-1.ea05a03fcd679cbb",
"annotationData": {
"content": {
"annotatedResult": {
"instances": [{
"label": "Native"
}]
}
}
}
}]
},
"new-test-14-metadata": {
"class-name": "Native",
"confidence": 0,
"human-annotated": "yes",
"creation-date": "2021-05-19T07:06:06",
"type": "groundtruth/custom"
}
}
}
}]
As you can see, the post-annotation function return value has the class-name of "Native" in the metadata so I would expect the class-name to be present in the data object metadata, but it's not. And here's a screenshot of the data object summary:
It seems like Ground Truth overwrote the metadata, and now the object doesn't contain the correct label. I think perhaps that's why my label is coming through as the label attribute name "new-test-14" instead of as the correct label "Native". Here's a screenshot of the labeling job in the AWS web console:
The web console is supposed to show the label "Native" inside the "Label" column but instead I'm getting the <labelattributename> "new-test-14" in the label column.
Here is the output.manifest file generated by Ground Truth at the end:
{
"source-ref": "s3://<file-name>",
"text": "Hello world",
"new-test-14": {
"annotationsFromAllWorkers": [{
"workerId": "private.us-east-1.ea05a03fcd679ert",
"annotationData": {
"content": {
"annotatedResult": {
"label": "Native"
}
}
}
}]
},
"new-test-14-metadata": {
"type": "groundtruth/custom",
"job-name": "new-test-14",
"human-annotated": "yes",
"creation-date": "2021-05-18T12:34:17.400000"
}
}
What should I return from the Post-Annotation function? Am I missing something in my response? How do I get the proper label to appear in the AWS web console?

Bittrex REST API for Python, I want to create an order using API v3 https://api.bittrex.com/v3/orders

I need help to create orders using the bittrex version 3 REST API. I have the code below and I can't understand what is missing to work.
I can make other GET calls, but I cannot make this POST request.
I don't know how to deal with the passing of parameters.
Official documentation at https://bittrex.github.io/api/v3#tag-Orders.
def NewOrder(market, amount, price):
#print 'open sell v3', market
market = 'HEDG-BTC'#'BTC-'+market
uri = 'https://api.bittrex.com/v3/orders?'
params = {
'marketSymbol': 'BTC-HEDG',#'HEDG-BTC', #market
'direction': 'BUY',
'type': 'LIMIT',
'quantity': amount,
'limit': price,
'timeInForce': 'POST_ONLY_GOOD_TIL_CANCELLED',
'useAwards': True
}
timestamp = str(int(time.time()*1000))
Content = ""
contentHash = hashlib.sha512(Content.encode()).hexdigest()
Method = 'POST'
uri2 = buildURI(uri, params)
#uri2 = 'https://api.bittrex.com/v3/orders?direction=BUY&limit=0.00021&marketSymbol=HEDG-BTC&quantity=1.1&timeInForce=POST_ONLY_GOOD_TIL_CANCELLED&type=LIMIT&useAwards=True'
#print uri2
PreSign = timestamp + uri2 + Method + contentHash# + subaccountId
#print PreSign
Signature = hmac.new(apisecret, PreSign.encode(), hashlib.sha512).hexdigest()
headers = {
'Api-Key' : apikey,
'Api-Timestamp' : timestamp,
'Api-Content-Hash': contentHash,
'Api-Signature' : Signature
}
r = requests.post(uri2, data={}, headers=headers, timeout=11)
return json.loads(r.content)
NewOrder('HEDG', 1.1, 0.00021)
And my error message:
{u'code': u'BAD_REQUEST', u'data': {u'invalidRequestParameter': u'direction'}, u'detail': u'Refer to the data field for specific field validation failures.'}
It seems from the documentation that this body is expected by the api as json data:
{
"marketSymbol": "string",
"direction": "string",
"type": "string",
"quantity": "number (double)",
"ceiling": "number (double)",
"limit": "number (double)",
"timeInForce": "string",
"clientOrderId": "string (uuid)",
"useAwards": "boolean"
}
and you are setting these values as url params that's the issue.
you need to do this:
uri = 'https://api.bittrex.com/v3/orders'
# NOTE >>>> please check that you provide all the required fields.
payload = {
'marketSymbol': 'BTC-HEDG',#'HEDG-BTC', #market
'direction': 'BUY',
'type': 'LIMIT',
'quantity': amount,
'limit': price,
'timeInForce': 'POST_ONLY_GOOD_TIL_CANCELLED',
'useAwards': True
}
# do rest of the stuffs as you are doing
# post payload as json data with the url given in doc
r = requests.post(uri, json=payload, headers=headers, timeout=11)
print(r.json())
If you still have issues let us know. If it works then please mark answer as accepted.
Hope this helps.
I made the following modifications to the code but started to give error in 'Content-Hash'
I'm assuming that some parameters are optional so they are commented.
def NewOrder(market, amount, price):
market = 'BTC-'+market
uri = 'https://api.bittrex.com/v3/orders'
payload = {
'marketSymbol': market,
'direction': 'BUY',
'type': 'LIMIT',
'quantity': amount,
#"ceiling": "number (double)",
'limit': price,
'timeInForce': 'POST_ONLY_GOOD_TIL_CANCELLED',
#"clientOrderId": "string (uuid)",
'useAwards': True
}
#ceiling (optional, must be included for ceiling orders and excluded for non-ceiling orders)
#clientOrderId (optional) client-provided identifier for advanced order tracking
timestamp = str(int(time.time()*1000))
Content = ''+json.dumps(payload, separators=(',',':'))
print Content
contentHash = hashlib.sha512(Content.encode()).hexdigest()
Method = 'POST'
#uri2 = buildURI(uri, payload)#line not used
print uri
#PreSign = timestamp + uri2 + Method + contentHash# + subaccountId
PreSign = timestamp + uri + Method + contentHash# + subaccountId
print PreSign
Signature = hmac.new(apisecret, PreSign.encode(), hashlib.sha512).hexdigest()
headers = {
'Api-Key' : apikey,
'Api-Timestamp' : timestamp,
'Api-Content-Hash': contentHash,
'Api-Signature' : Signature
}
r = requests.post(uri, json=payload, headers=headers, timeout=11)
print(r.json())
return json.loads(r.content)
NewOrder('HEDG', 1.5, 0.00021)
{u'code': u'INVALID_CONTENT_HASH'}
Bittrex API via requests package PYTHON
import hmac
import hashlib
import time, requests
nonce = str(int(time.time() * 1000))
content_hash = hashlib.sha512(''.encode()).hexdigest()
signature = hmac.new(
'<SECRET_KEY>'.encode(),
''.join([nonce, url, 'GET', content_hash]).encode(),
hashlib.sha512
).hexdigest()
headers = {
'Api-Timestamp': nonce,
'Api-Key': '<API_KEY>',
'Content-Type': 'application/json',
'Api-Content-Hash': content_hash,
'Api-Signature': signature
}
result = requests.get(url=url, headers=headers)

How to retrieve multiple items from Dynamo DB using AWS lambda

How to get multiple items from DB. the below code throws me an error as it fetches only one item. I am retrieving the items based on email value.
import json
import os
import boto3
import decimalencoder
dynamodb = boto3.resource('dynamodb')
def get(event, context):
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
# fetch a person from the database
result = table.get_item(
Key={
'email': event['pathParameters']['email']
}
)
# create a response
response = {
"statusCode": 200,
"body": json.dumps(result['Item'], cls=decimalencoder.DecimalEncoder),
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": "true"
}
}
return response
To retrive multiple rows from db, first query on id you want data to be filtered.
Then maintain a list to store all row values in it.
def lambda_handler(event,context):
item = table.query(
KeyConditionExpression=Key('hubID').eq(hubId)
)
if (item["Count"] == 0):
response = {"msg": "Item not exist, can't perform READ"}
else:
i = 1
lst = []
while i < item["Count"]:
response = {
"hubId" : item["Items"][i]["hubID"],
"deviceState": int(item["Items"][i]["deviceState"]),
"deviceId": item["Items"][i]["deviceID"],
"deviceType": item["Items"][i]["deviceType"],
"intensity": int(item["Items"][i]["intensity"])
}
lst.append(response)
i += 1
print(lst)
response = lst
return response

Google Maps Geolocation API returns the latitude and longitude of my server

During my testing the Geolocation API use to return proper result.
But in deployment on AWS it is returning me the IP address of Virginia where my EC2 instances are located.
"cellId": 27193,
"locationAreaCode": 17007,
"mobileCountryCode": 404,
"mobileNetworkCode": 20
Result:
"lat": 19.2019619,
"lng": 73.1063466
But on AWs it returns:
"lat": 39.043756699999996
"lng": -77.4874416
params = {
"key": SERVER_KEY
}
data = json.dumps({
"cellTowers": [
{
"cellId": cid,
"locationAreaCode": lac,
"mobileCountryCode": mcc,
"mobileNetworkCode": mnc
}
]
})
log.info(params)
log.info(data)
request = requests.post(HOST_URL, params=params, data=data)
# print(request.text)
# print(request.status_code)
if request.status_code == 200:
response = json.loads(request.text)
latitude = response["location"]["lat"]
longitude = response["location"]["lng"]
return {
"latitude": latitude,
"longitude": longitude
}
else:
error_data = request.text
log.error("Error Occurred in Finding Cell Tower Location")
log.error(request.text)
log.error(json.dumps(params))
send_error_notification(error_data)
return None
Answer is so simple and how silly of me assuming that not adding header would not affect me.
By just adding the header with application/json everything is working like a charm.
params = {
"key": SERVER_KEY
}
data = json.dumps({
"considerIp": False,
"cellTowers": [
{
"cellId": cid,
"locationAreaCode": lac,
"mobileCountryCode": mcc,
"mobileNetworkCode": mnc
}
]
})
headers = {
'content-type': "application/json",
'cache-control': "no-cache"
}
log.info(params)
log.info(data)
request = requests.post(HOST_URL, params=params, data=data, headers=headers)