How to pull an array from POST request with-in Django - django

I am trying to extract an Array from a POST request using Django.
def settlements_orders(request):
database = "####"
if request.method == 'POST':
first_date = request.POST.get('first_date', False)
last_date = request.POST.get('last_date', False)
brands = request.POST.getlist('brands[]')
else:
first_date = '2020-01-01'
last_date = '2020-05-13'
brands= ['ICON-APPAREL']
settlements_orders = SettlementOrders.objects.using(database)\
.filter(posted_date__date__range=[first_date, last_date]) \
.filter(sku__brand__in=brands) \
.select_related('sku').select_related('posted_date') \
.values('sku__company','sku__brand','sku__department','sku__category','sku__sub_category',
'sku__parent_asin','sku__asin','sku__vendor', 'sku__color','sku__size','sku__case_pack',
'posted_date__year', 'posted_date__month'
)\
.annotate(count=Count('order_id'),
qty_sold=Sum('quantity_sold'),
qty_returned=Sum('quantity_returned'),
order_revenue=Sum('order_revenue'),
refund_revenue=Sum('refund_revenue'),
promotion=Sum('promotion'),
giftwrap_collected=Sum('giftwrap_collected'),
giftwrap_paid=Sum('giftwrap_paid'),
shipping_fees_collected=Sum('shipping_fees_collected'),
shipping_fees_paid=Sum('shipping_fees_paid'),
sales_tax_collected=Sum('sales_tax_collected'),
sales_tax_paid=Sum('sales_tax_paid'),
shipping_tax_collected=Sum('shipping_tax_collected'),
shipping_tax_paid=Sum('shipping_tax_paid'),
fulfillment_fees=Sum('fulfillment_fees'),
comission_fees=Sum('comission_fees'),
closing_fees=Sum('closing_fees'),
other_fees=Sum('other_fees'),
restock_fees=Sum('restock_fees'),
goodwill_paid=Sum('goodwill_paid'),
goodwill_reimbursed=Sum('goodwill_reimbursed'),
return_fees=Sum('return_fees'),
reimbursements=Sum('reimbursements'),
margin=Sum('margin')
)
return JsonResponse(settlements_orders[::1], safe=False)
I am using the Array to filter data When I make a GET request I am returned data but on a POST request, I get a 500 response. Any help would be appreciated.

Related

Assign database value to Django variable

Looking to do something like this is Django:
$price = mysql_query("SELECT price FROM products WHERE product = '$product'");
$result = mysql_fetch_array($price);
If $result['price'] == 1:
do something
My thought was something like below but seems to be way off:
item = Order.objects.get(quote_number = quotex)
price = item.quote_status
headers = {
# Already added when you pass json= but not when you pass data=
# 'Content-Type': 'application/json',
'X-API-KEY': '',
}
messages.success(request, price)
if price == 2:
Price == 2 does not see the number 2 where messages 2 on price var
please note this is not being used to output to a template, as I already know how to do that.

Django not rendering template after POST method

I'm trying to render the Django template after uploading a file (POST method). The issue is: I insert some print() inside the POST request.method and everything is working fine (terminal VSCode) but the template (HTML) is not being displayed inside the render() function.
View.py:
def index(request):
if 'GET' == request.method:
print("It didn't work it!")
print(request.FILES['file'])
return render(request, 'auditoria_app/index.html')
else:
print('It worked it!')
excel_file = request.FILES["file"]
wb = openpyxl.load_workbook(excel_file, data_only=True)
wb.security.workbook_password = 'Rnip*2020'
inputsCombo = wb['Inputs COMBO']
inputsDNCombo = wb['Inputs DN COMBO']
outputCombo = wb['OutPut COMBO']
faixas = wb['Faixas']
wb2 = openpyxl.load_workbook('media/sigma.xlsx', data_only=True)
sigma = wb2['sigma']
sic = wb2['Performance']
# wb4 = openpyxl.load_workbook('media/ultimoContrato.xlsm', data_only=True)
# ultimoContrato = wb4['Base']
numeroIbms = inputsCombo['F5'].value
ibms = []
output = outputResults(numeroIbms, outputCombo)
for i in range(8, 8 + numeroIbms):
ibm = Ibm(i, inputsCombo, inputsDNCombo, outputCombo, faixas, sigma)
ibms.append(ibm)
print(ibm.numeroIbm)
# sigmaReport(sigma, ibm)
return render(request, 'auditoria_app/index.html',
{'ibms': ibms,
'numeroIbms': numeroIbms,
'output': output,
})
Message displayed on VSCode terminal:
It worked it!
Outside: SAE_R360_pagnussat.xlsm
1038108
1049885
[04/Sep/2021 21:18:36] "POST /auditoria/ HTTP/1.1" 200 5132

Cross Account Cloudtrail log transfer through Cloudwatch and Kinesis data stream

I am using Cloudwatch subscriptions to send over cloudtrail log of one account into another. The Account receiving the logs has a Kinesis data stream which receives the logs from the cloudwatch subscription and invokes the standard lambda function provided by AWS to parse and store the logs to an S3 bucket of the log receiver account.
The log files getting written to s3 bucket are in the form of :
{"eventVersion":"1.08","userIdentity":{"type":"AssumedRole","principalId":"AA:i-096379450e69ed082","arn":"arn:aws:sts::34502sdsdsd:assumed-role/RDSAccessRole/i-096379450e69ed082","accountId":"34502sdsdsd","accessKeyId":"ASIAVAVKXAXXXXXXXC","sessionContext":{"sessionIssuer":{"type":"Role","principalId":"AROAVAVKXAKDDDDD","arn":"arn:aws:iam::3450291sdsdsd:role/RDSAccessRole","accountId":"345029asasas","userName":"RDSAccessRole"},"webIdFederationData":{},"attributes":{"mfaAuthenticated":"false","creationDate":"2021-04-27T04:38:52Z"},"ec2RoleDelivery":"2.0"}},"eventTime":"2021-04-27T07:24:20Z","eventSource":"ssm.amazonaws.com","eventName":"ListInstanceAssociations","awsRegion":"us-east-1","sourceIPAddress":"188.208.227.188","userAgent":"aws-sdk-go/1.25.41 (go1.13.15; linux; amd64) amazon-ssm-agent/","requestParameters":{"instanceId":"i-096379450e69ed082","maxResults":20},"responseElements":null,"requestID":"a5c63b9d-aaed-4a3c-9b7d-a4f7c6b774ab","eventID":"70de51df-c6df-4a57-8c1e-0ffdeb5ac29d","readOnly":true,"resources":[{"accountId":"34502914asasas","ARN":"arn:aws:ec2:us-east-1:3450291asasas:instance/i-096379450e69ed082"}],"eventType":"AwsApiCall","managementEvent":true,"eventCategory":"Management","recipientAccountId":"345029149342"}
{"eventVersion":"1.08","userIdentity":{"type":"AssumedRole","principalId":"AROAVAVKXAKPKZ25XXXX:AmazonMWAA-airflow","arn":"arn:aws:sts::3450291asasas:assumed-role/dev-1xdcfd/AmazonMWAA-airflow","accountId":"34502asasas","accessKeyId":"ASIAVAVKXAXXXXXXX","sessionContext":{"sessionIssuer":{"type":"Role","principalId":"AROAVAVKXAKPKZXXXXX","arn":"arn:aws:iam::345029asasas:role/service-role/AmazonMWAA-dlp-dev-1xdcfd","accountId":"3450291asasas","userName":"dlp-dev-1xdcfd"},"webIdFederationData":{},"attributes":{"mfaAuthenticated":"false","creationDate":"2021-04-27T07:04:08Z"}},"invokedBy":"airflow.amazonaws.com"},"eventTime":"2021-04-27T07:23:46Z","eventSource":"logs.amazonaws.com","eventName":"CreateLogStream","awsRegion":"us-east-1","sourceIPAddress":"airflow.amazonaws.com","userAgent":"airflow.amazonaws.com","errorCode":"ResourceAlreadyExistsException","errorMessage":"The specified log stream already exists","requestParameters":{"logStreamName":"scheduler.py.log","logGroupName":"dlp-dev-DAGProcessing"},"responseElements":null,"requestID":"40b48ef9-fc4b-4d1a-8fd1-4f2584aff1e9","eventID":"ef608d43-4765-4a3a-9c92-14ef35104697","readOnly":false,"eventType":"AwsApiCall","apiVersion":"20140328","managementEvent":true,"eventCategory":"Management","recipientAccountId":"3450291asasas"}
The problem with this type of log lines is that Athena is not able to Parse these log lines and I am not able to query the logs using Athena.
I tried modifying the blueprint lambda function to save the log file as a standard JSON result which would make it easy for Athena to parse the files.
Eg:
{'Records': ['{"eventVersion":"1.08","userIdentity":{"type":"AssumedRole","principalId":"AROAVAVKXAKPBRW2S3TAF:i-096379450e69ed082","arn":"arn:aws:sts::345029149342:assumed-role/RightslineRDSAccessRole/i-096379450e69ed082","accountId":"345029149342","accessKeyId":"ASIAVAVKXAKPBL653UOC","sessionContext":{"sessionIssuer":{"type":"Role","principalId":"AROAVAVKXAKPXXXXXXX","arn":"arn:aws:iam::34502asasas:role/RDSAccessRole","accountId":"345029asasas","userName":"RDSAccessRole"},"webIdFederationData":{},"attributes":{"mfaAuthenticated":"false","creationDate":"2021-04-27T04:38:52Z"},"ec2RoleDelivery":"2.0"}},"eventTime":"2021-04-27T07:24:20Z","eventSource":"ssm.amazonaws.com","eventName":"ListInstanceAssociations","awsRegion":"us-east-1","sourceIPAddress":"188.208.227.188","userAgent":"aws-sdk-go/1.25.41 (go1.13.15; linux; amd64) amazon-ssm-agent/","requestParameters":{"instanceId":"i-096379450e69ed082","maxResults":20},"responseElements":null,"requestID":"a5c63b9d-aaed-4a3c-9b7d-a4f7c6b774ab","eventID":"70de51df-c6df-4a57-8c1e-0ffdeb5ac29d","readOnly":true,"resources":[{"accountId":"3450291asasas","ARN":"arn:aws:ec2:us-east-1:34502asasas:instance/i-096379450e69ed082"}],"eventType":"AwsApiCall","managementEvent":true,"eventCategory":"Management","recipientAccountId":"345029asasas"}]}
The modified code for Blueprint Lambda function that I looks like:
import base64
import json
import gzip
from io import BytesIO
import boto3
def transformLogEvent(log_event):
return log_event['message'] + '\n'
def processRecords(records):
for r in records:
data = base64.b64decode(r['data'])
striodata = BytesIO(data)
with gzip.GzipFile(fileobj=striodata, mode='r') as f:
data = json.loads(f.read())
recId = r['recordId']
if data['messageType'] == 'CONTROL_MESSAGE':
yield {
'result': 'Dropped',
'recordId': recId
}
elif data['messageType'] == 'DATA_MESSAGE':
result = {}
result["Records"] = {}
events = []
for e in data['logEvents']:
events.append(e["message"])
result["Records"] = events
print(result)
if len(result) <= 6000000:
yield {
'data': result,
'result': 'Ok',
'recordId': recId
}
else:
yield {
'result': 'ProcessingFailed',
'recordId': recId
}
else:
yield {
'result': 'ProcessingFailed',
'recordId': recId
}
def putRecordsToFirehoseStream(streamName, records, client, attemptsMade, maxAttempts):
failedRecords = []
codes = []
errMsg = ''
# if put_record_batch throws for whatever reason, response['xx'] will error out, adding a check for a valid
# response will prevent this
response = None
try:
response = client.put_record_batch(DeliveryStreamName=streamName, Records=records)
except Exception as e:
failedRecords = records
errMsg = str(e)
# if there are no failedRecords (put_record_batch succeeded), iterate over the response to gather results
if not failedRecords and response and response['FailedPutCount'] > 0:
for idx, res in enumerate(response['RequestResponses']):
# (if the result does not have a key 'ErrorCode' OR if it does and is empty) => we do not need to re-ingest
if 'ErrorCode' not in res or not res['ErrorCode']:
continue
codes.append(res['ErrorCode'])
failedRecords.append(records[idx])
errMsg = 'Individual error codes: ' + ','.join(codes)
if len(failedRecords) > 0:
if attemptsMade + 1 < maxAttempts:
print('Some records failed while calling PutRecordBatch to Firehose stream, retrying. %s' % (errMsg))
putRecordsToFirehoseStream(streamName, failedRecords, client, attemptsMade + 1, maxAttempts)
else:
raise RuntimeError('Could not put records after %s attempts. %s' % (str(maxAttempts), errMsg))
def putRecordsToKinesisStream(streamName, records, client, attemptsMade, maxAttempts):
failedRecords = []
codes = []
errMsg = ''
# if put_records throws for whatever reason, response['xx'] will error out, adding a check for a valid
# response will prevent this
response = None
try:
response = client.put_records(StreamName=streamName, Records=records)
except Exception as e:
failedRecords = records
errMsg = str(e)
# if there are no failedRecords (put_record_batch succeeded), iterate over the response to gather results
if not failedRecords and response and response['FailedRecordCount'] > 0:
for idx, res in enumerate(response['Records']):
# (if the result does not have a key 'ErrorCode' OR if it does and is empty) => we do not need to re-ingest
if 'ErrorCode' not in res or not res['ErrorCode']:
continue
codes.append(res['ErrorCode'])
failedRecords.append(records[idx])
errMsg = 'Individual error codes: ' + ','.join(codes)
if len(failedRecords) > 0:
if attemptsMade + 1 < maxAttempts:
print('Some records failed while calling PutRecords to Kinesis stream, retrying. %s' % (errMsg))
putRecordsToKinesisStream(streamName, failedRecords, client, attemptsMade + 1, maxAttempts)
else:
raise RuntimeError('Could not put records after %s attempts. %s' % (str(maxAttempts), errMsg))
def createReingestionRecord(isSas, originalRecord):
if isSas:
return {'data': base64.b64decode(originalRecord['data']), 'partitionKey': originalRecord['kinesisRecordMetadata']['partitionKey']}
else:
return {'data': base64.b64decode(originalRecord['data'])}
def getReingestionRecord(isSas, reIngestionRecord):
if isSas:
return {'Data': reIngestionRecord['data'], 'PartitionKey': reIngestionRecord['partitionKey']}
else:
return {'Data': reIngestionRecord['data']}
def lambda_handler(event, context):
print(event)
isSas = 'sourceKinesisStreamArn' in event
streamARN = event['sourceKinesisStreamArn'] if isSas else event['deliveryStreamArn']
region = streamARN.split(':')[3]
streamName = streamARN.split('/')[1]
records = list(processRecords(event['records']))
projectedSize = 0
dataByRecordId = {rec['recordId']: createReingestionRecord(isSas, rec) for rec in event['records']}
putRecordBatches = []
recordsToReingest = []
totalRecordsToBeReingested = 0
for idx, rec in enumerate(records):
if rec['result'] != 'Ok':
continue
projectedSize += len(rec['data']) + len(rec['recordId'])
# 6000000 instead of 6291456 to leave ample headroom for the stuff we didn't account for
if projectedSize > 6000000:
totalRecordsToBeReingested += 1
recordsToReingest.append(
getReingestionRecord(isSas, dataByRecordId[rec['recordId']])
)
records[idx]['result'] = 'Dropped'
del(records[idx]['data'])
# split out the record batches into multiple groups, 500 records at max per group
if len(recordsToReingest) == 500:
putRecordBatches.append(recordsToReingest)
recordsToReingest = []
if len(recordsToReingest) > 0:
# add the last batch
putRecordBatches.append(recordsToReingest)
# iterate and call putRecordBatch for each group
recordsReingestedSoFar = 0
if len(putRecordBatches) > 0:
client = boto3.client('kinesis', region_name=region) if isSas else boto3.client('firehose', region_name=region)
for recordBatch in putRecordBatches:
if isSas:
putRecordsToKinesisStream(streamName, recordBatch, client, attemptsMade=0, maxAttempts=20)
else:
putRecordsToFirehoseStream(streamName, recordBatch, client, attemptsMade=0, maxAttempts=20)
recordsReingestedSoFar += len(recordBatch)
print('Reingested %d/%d records out of %d' % (recordsReingestedSoFar, totalRecordsToBeReingested, len(event['records'])))
else:
print('No records to be reingested')
return {"records": records}
My end goal is to store the result on S3 as JSON so that it can be queried easily with Athena.
the line where the transformation is happening is:
elif data['messageType'] == 'DATA_MESSAGE':
Any help in this would be greatly appreciated.

Scheduling tasks with dynamic arguments to run periodically with celery in django

I have a tasks that contains dynamic arguments I want to run periodically, how do I pass dynamic elements to the tasks arguments when the task is being called in django celery beat?
Here is the task I want to run periodically:
#task(bind=True)
def generate_export(export_type, xform, export_id=None, options=None):
"""
Create appropriate export object given the export type.
param: export_type
param: xform
params: export_id: ID of export object associated with the request
param: options: additional parameters required for the lookup.
binary_select_multiples: boolean flag
end: end offset
ext: export extension type
dataview_pk: dataview pk
group_delimiter: "/" or "."
query: filter_query for custom queries
remove_group_name: boolean flag
split_select_multiples: boolean flag
index_tag: ('[', ']') or ('_', '_')
show_choice_labels: boolean flag
language: language labels as in the XLSForm/XForm
"""
username = xform.user.username
id_string = xform.id_string
end = options.get("end")
extension = options.get("extension", export_type)
filter_query = options.get("query")
remove_group_name = options.get("remove_group_name", False)
start = options.get("start")
export_type_func_map = {
Export.XLS_EXPORT: 'to_xls_export',
Export.CSV_EXPORT: 'to_flat_csv_export',
Export.DHIS2CSV_EXPORT: 'to_dhis2csv_export',
Export.CSV_ZIP_EXPORT: 'to_zipped_csv',
Export.SAV_ZIP_EXPORT: 'to_zipped_sav',
Export.GOOGLE_SHEETS_EXPORT: 'to_google_sheets',
}
if xform is None:
xform = XForm.objects.get(
user__username__iexact=username, id_string__iexact=id_string)
dataview = None
if options.get("dataview_pk"):
dataview = DataView.objects.get(pk=options.get("dataview_pk"))
records = dataview.query_data(dataview, all_data=True,
filter_query=filter_query)
total_records = dataview.query_data(dataview,
count=True)[0].get('count')
else:
records = query_data(xform, query=filter_query, start=start, end=end)
if filter_query:
total_records = query_data(xform, query=filter_query, start=start,
end=end, count=True)[0].get('count')
else:
total_records = xform.num_of_submissions
if isinstance(records, QuerySet):
records = records.iterator()
export_builder = ExportBuilder()
export_builder.TRUNCATE_GROUP_TITLE = True \
if export_type == Export.SAV_ZIP_EXPORT else remove_group_name
export_builder.GROUP_DELIMITER = options.get(
"group_delimiter", DEFAULT_GROUP_DELIMITER
)
export_builder.SPLIT_SELECT_MULTIPLES = options.get(
"split_select_multiples", True
)
export_builder.BINARY_SELECT_MULTIPLES = options.get(
"binary_select_multiples", False
)
export_builder.INCLUDE_LABELS = options.get('include_labels', False)
export_builder.INCLUDE_LABELS_ONLY = options.get(
'include_labels_only', False
)
export_builder.INCLUDE_HXL = options.get('include_hxl', False)
export_builder.INCLUDE_IMAGES \
= options.get("include_images", settings.EXPORT_WITH_IMAGE_DEFAULT)
export_builder.VALUE_SELECT_MULTIPLES = options.get(
'value_select_multiples', False)
export_builder.REPEAT_INDEX_TAGS = options.get(
"repeat_index_tags", DEFAULT_INDEX_TAGS
)
export_builder.SHOW_CHOICE_LABELS = options.get('show_choice_labels',
False)
export_builder.language = options.get('language')
# 'win_excel_utf8' is only relevant for CSV exports
if 'win_excel_utf8' in options and export_type != Export.CSV_EXPORT:
del options['win_excel_utf8']
export_builder.set_survey(xform.survey, xform)
# change the dhis2csv exports to standard csv format
if extension == 'dhis2csv':
extension = 'csv'
temp_file = NamedTemporaryFile(suffix=("." + extension))
columns_with_hxl = export_builder.INCLUDE_HXL and get_columns_with_hxl(
xform.survey_elements)
# get the export function by export type
func = getattr(export_builder, export_type_func_map[export_type])
try:
func.__call__(
temp_file.name, records, username, id_string, filter_query,
start=start, end=end, dataview=dataview, xform=xform,
options=options, columns_with_hxl=columns_with_hxl,
total_records=total_records
)
except NoRecordsFoundError:
pass
except SPSSIOError as e:
export = get_or_create_export(export_id, xform, export_type, options)
export.error_message = str(e)
export.internal_status = Export.FAILED
export.save()
report_exception("SAV Export Failure", e, sys.exc_info())
return export
# generate filename
basename = "%s_%s" % (
id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f"))
if remove_group_name:
# add 'remove group name' flag to filename
basename = "{}-{}".format(basename, GROUPNAME_REMOVED_FLAG)
if dataview:
basename = "{}-{}".format(basename, DATAVIEW_EXPORT)
filename = basename + "." + extension
# check filename is unique
while not Export.is_filename_unique(xform, filename):
filename = increment_index_in_filename(filename)
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
# seek to the beginning as required by storage classes
temp_file.seek(0)
export_filename = default_storage.save(file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
export = get_or_create_export(export_id, xform, export_type, options)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
# do not persist exports that have a filter
# Get URL of the exported sheet.
if export_type == Export.GOOGLE_SHEETS_EXPORT:
export.export_url = export_builder.url
# if we should create a new export is true, we should not save it
if start is None and end is None:
export.save()
return export
and this is where I call the tasks in the celery beat schedule:
CELERY_BEAT_SCHEDULE = {
'download_csv': {
'task': 'onadata.libs.utils.export_tools.generate_export',
# There are 4 ways we can handle time, read further
'schedule': crontab(minute='*'),
# If you're using any arguments
'args': ()
}
}
how do I pass parameters into the arguments for the tasks??
There is no way to pass argument dynamically in Celery Beat. I think your function is not suitable with periodic task.
Instead of giving a factor directly to the generate_export function, it must be changed to get the required items within the function. Or change to a simple asynchronous operation.
I faced a similar problem. The args field in beat_schedule is fixed at startup and does not change afterward.
But there is a hackish way to pass different arguments to your task.
Use the before_task_publish signal to add custom data in headers.
from celery.signals import before_task_publish
#before_task_publish.connect
def before_publish(sender=None, headers=None, body=None, **kwargs):
if sender == "tasks.generate_export":
headers["custom_args"] = {
"export_type": "some_val"
"xform": "some_val"
"export_id": get_export_id()
"options": options_dict
}
By default, Celery uses JSON serializer. So, make sure the data you add to headers are JSON serializable. Alternatively, you can use pickle to serialize the data, but it brings security concerns with it.
Now you can access these headers in a bound task.
#task(bind=True)
def generate_export(self):
args = self.request.get("custom_args", None)
# do something with args

Django: How to add an extra form to a formset after it has been constructed?

This is roughly what I'm trying to do:
def post(request):
VehicleFormSet = formset_factory(StaffVehicleForm)
if request.method == 'POST':
vehicle_formset = VehicleFormSet(request.POST)
if 'add_vehicle' in request.POST:
if vehicle_formset.is_valid():
form_count = vehicle_formset.total_form_count()
vehicle_formset.forms.append(vehicle_formset._construct_form(form_count))
Basically, if a user clicks the "Add" button and their entry is valid, I want to add another blank form to the formset, and hide the previous one.
The problem with the code above is that I can't figure out how to increase total_form_count(). The way I have it now, it will work once, and then if you press it again, nothing will happen, presumably because form_count is the same. I also don't like calling _construct_form and relying on the internals.
class RequiredFormSet(BaseFormSet):
def add_form(self, **kwargs):
# add the form
tfc = self.total_form_count()
self.forms.append(self._construct_form(tfc, **kwargs))
self.forms[tfc].is_bound = False
# make data mutable
self.data = self.data.copy()
# increase hidden form counts
total_count_name = '%s-%s' % (self.management_form.prefix, TOTAL_FORM_COUNT)
initial_count_name = '%s-%s' % (self.management_form.prefix, INITIAL_FORM_COUNT)
self.data[total_count_name] = self.management_form.cleaned_data[TOTAL_FORM_COUNT] + 1
self.data[initial_count_name] = self.management_form.cleaned_data[INITIAL_FORM_COUNT] + 1
def add_fields(self, form, index):
super(RequiredFormSet, self).add_fields(form, index)
form.empty_permitted = False
That will do it. Only took 7 hours to figure out. And I still don't know why I need .is_bound = False to make the initial values not screw up.
I do this using javascript. Since the formset renders three management fields
<input type="hidden" id="id_TOTAL_FORMS" value="1" name="TOTAL_FORMS">
<input type="hidden" id="id_INITIAL_FORMS" value="1" name="INITIAL_FORMS">.
<input type="hidden" id="id_MAX_NUM_FORMS" name="MAX_NUM_FORMS">
you can use javascript to increment the id_TOTAL_FORMS value, and just add in the extra fields. So I'd create my fieldset like this:
VehicleFormSet = modelformset_factory(StaffVehicleForm, extra = 0, max_num = None)
The tricky thing is to create the extra form fields in javascript. I usually use AJAX to fetch a new row from a custom view.
For posterity here is another way which works without JS (or alongside JS) and which does not require intimate knowledge of formset methods. Instead, you can just inspect the POST data and adjust it as if JS had done some work client-side. The following makes sure that there is always (at least) one empty form at the end of the formset:
def hsview( request):
HS_formset = formset_factory( HSTestForm, extra=3 )
prefix='XYZZY'
testinpost, empty = 'key', '' # field in the form and its default/empty value
extra=3
# I prefer to do the short init of unbound forms first, so I invert the usual test ...
if request.method != 'POST':
formset = HS_formset( prefix=prefix)
else:
# process POSTed forms data.
# pull all relevant things out of POST data, because POST itself is not mutable
# (it doesn't matter if prefix allows in extraneous items)
data = { k:v for k,v in request.POST.items() if k.startswith(prefix) }
#if there are no spare empty forms, tell it we want another form, in place of or extra to client-side JS
#don't want to crash if unvalidated POST data is nbg so catch all ...
try:
n = int( data[ prefix + '-TOTAL_FORMS'])
test = '{}-{}-{}'.format(prefix, n-1, testinpost)
#print(test)
test = data.get( test, empty)
except Exception:
test = 'bleagh'
# log the error if it matters enough ...
if test != empty:
data[ prefix + '-TOTAL_FORMS'] = n + 1
# now the usual formset processing ...
formset = HS_formset( data, prefix=prefix)
# other_form = OtherForm( request.POST)
if formset.is_valid():
...
I use RegEx in my Vue.js method:
addForm: function () {
this.count++
let form_count = this.count
form_count++
let formID = 'id_form-' + this.count
incremented_form = this.vue_form.replace(/form-\d/g, 'form-' + this.count)
this.formList.push(incremented_form)
this.$nextTick(() => {
let total_forms = document.getElementsByName('form-TOTAL_FORMS').forEach
(function (ele, idx) {
ele.value = form_count
})
})
},
delForm: function () {
if (this.count != 0) {
this.count--
let form_count = this.count
form_count++
let formID = 'id_form-' + this.count
this.formList.pop()
this.$nextTick(() => {
let total_forms = document.getElementsByName('form-TOTAL_FORMS').forEach
(function (ele, idx) {
ele.value = form_count
})
})
}
else return
},