I'm using OpenSSL in my C++ program, and I need to link crypto and ssl with it. If it were for example gcc, I would just pass:
-lcrypto -lssl
I am adding this dependency in Network-Simulator 3.
But I don't know how to do this in WAF. How should I add them as a dependency?
First you need to check in configure if the library is available, then you can build it.
def configure(cnf):
# other parameters omitted for brevity
cnf.check(lib=["crypto", "ssl"])
def build(bld):
# other parameters omitted for brevity
bld(use=["crypto", "ssl"])
You could also use the uselib_store parameter if you don't want to repeat the libraries:
cnf.check(lib=["crypto", "ssl"], uselib_store=["libs"])
bld(use=["libs"])
I tried adding CRYPTOPP instead of Crypto because I could do the same work with CRYPTOPP.
This is how it worked for me.
The below code needs to be added/edited in wscript found in ns-3.xx directory.
def configure(conf):
#other parameters removed
env = conf.env
conf.env['cryptopp'] = conf.check(mandatory=True, lib='cryptopp', uselib_store='CRYPTOPP')
conf.env['sph'] = conf.check(mandatory=True, lib='sph', uselib_store='SPH')
conf.env.append_value('CXXDEFINES', 'ENABLE_CRYPTOPP')
conf.env.append_value('CCDEFINES', 'ENABLE_CRYPTOPP')
conf.env['lssl'] = conf.check(mandatory=True, lib='ssl', uselib_store='OPENSSL')
conf.env.append_value('CXXDEFINES', 'ENABLE_SSL')
conf.env.append_value('CCDEFINES', 'ENABLE_SSL')
.....
def create_ns3_program(bld, name, dependencies=('core',)):
program = bld(features='cxx cxxprogram')
#other parameters removed
program.use = program.ns3_module_dependencies
if program.env['ENABLE_STATIC_NS3']:
if sys.platform == 'darwin':
program.env.STLIB_MARKER = '-Wl,-all_load,-lcryptopp,-lsph'
else:
program.env.STLIB_MARKER = '-Wl,-Bstatic,--whole-archive,-lcryptopp,-lsph'
program.env.SHLIB_MARKER = '-Wl,-Bdynamic,--no-whole-archive,-lcryptopp,-lsph'
else:
if program.env.DEST_BINFMT == 'elf':
# All ELF platforms are impacted but only the gcc compiler has a flag to fix it.
if 'gcc' in (program.env.CXX_NAME, program.env.CC_NAME):
program.env.append_value ('SHLIB_MARKER', '-Wl,--no-as-needed,-lcryptopp,-lsph')
return program
.......
def add_scratch_programs(bld):
all_modules = [mod[len("ns3-"):] for mod in bld.env['NS3_ENABLED_MODULES']]
try:
for filename in os.listdir("scratch"):
if filename.startswith('.') or filename == 'CVS':
continue
if os.path.isdir(os.path.join("scratch", filename)):
obj = bld.create_ns3_program(filename, all_modules)
obj.path = obj.path.find_dir('scratch').find_dir(filename)
obj.source = obj.path.ant_glob('*.cc')
obj.target = filename
obj.name = obj.target
obj.install_path = None
#Add the below paramters
obj.uselib = 'CRYPTOPP'
obj.uselib = 'SPH'
obj.uselib = 'OPENSSL'
elif filename.endswith(".cc"):
name = filename[:-len(".cc")]
obj = bld.create_ns3_program(name, all_modules)
obj.path = obj.path.find_dir('scratch')
obj.source = filename
obj.target = name
obj.name = obj.target
obj.install_path = None
#Add the below paramters
obj.uselib = 'CRYPTOPP'
obj.uselib = 'SPH'
obj.uselib = 'OPENSSL'
except OSError:
return
This basically adds all paramters at runtime to the simulator you are running from scratch.
PS: This script is specific to ns-3.
Related
I am running a Vertex AI batch prediction using the python API.
The function I am using is from the google cloud docs:
def create_batch_prediction_job_dedicated_resources_sample(
key_path,
project: str,
location: str,
model_display_name: str,
job_display_name: str,
gcs_source: Union[str, Sequence[str]],
gcs_destination: str,
machine_type: str = "n1-standard-2",
sync: bool = True,
):
credentials = service_account.Credentials.from_service_account_file(
key_path)
# Initilaize an aiplatfrom object
aiplatform.init(project=project, location=location, credentials=credentials)
# Get a list of Models by Model name
models = aiplatform.Model.list(filter=f'display_name="{model_display_name}"')
model_resource_name = models[0].resource_name
# Get the model
my_model = aiplatform.Model(model_resource_name)
batch_prediction_job = my_model.batch_predict(
job_display_name=job_display_name,
gcs_source=gcs_source,
gcs_destination_prefix=gcs_destination,
machine_type=machine_type,
sync=sync,
)
#batch_prediction_job.wait_for_resource_creation()
batch_prediction_job.wait()
print(batch_prediction_job.display_name)
print(batch_prediction_job.resource_name)
print(batch_prediction_job.state)
return batch_prediction_job
datetime_today = datetime.datetime.now()
model_display_name = 'test_model'
key_path = 'vertex_key.json'
project = 'my_project'
location = 'asia-south1'
job_display_name = 'batch_prediction_' + str(datetime_today)
model_name = '1234'
gcs_source = 'gs://my_bucket/Cleaned_Data/user_item_pairs.jsonl'
gcs_destination = 'gs://my_bucket/prediction'
create_batch_prediction_job_dedicated_resources_sample(key_path,project,location,model_display_name,job_display_name,
gcs_source,gcs_destination)
OUTPUT:
92 current state:
JobState.JOB_STATE_RUNNING
INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/my_project/locations/asia-south1/batchPredictionJobs/37737350127597649
The above output is being printed on the terminal over and over after every few seconds.
The issue that I have is that the python program calling this function keeps on running until it is force stopped. I have tried both batch_prediction_job.wait() & batch_prediction_job.wait_for_resource_creation() with the same results.
How do I start a batch_prediction_job without waiting for it to complete and terminating the program just after the job has be created?
I gave you the wrong instruction on the comments, change the parameter sync=False and the function should return just after be executed.
Whether this function call should be synchronous (wait for pipeline run to finish before terminating) or asynchronous (return immediately)
sync=False
def create_batch_prediction_job_dedicated_resources_sample(
# ...
sync: bool = False,
):
UPDATE - Adding more details:
Check here my notebook code where I tested it and its working:
You have to change the sync=False AND remove/comment the following print lines:
#batch_prediction_job.wait()
#print(batch_prediction_job.display_name)
#print(batch_prediction_job.resource_name)
#print(batch_prediction_job.state)
Your code edited:
def create_batch_prediction_job_dedicated_resources_sample(
key_path,
project: str,
location: str,
model_display_name: str,
job_display_name: str,
gcs_source: Union[str, Sequence[str]],
gcs_destination: str,
machine_type: str = "n1-standard-2",
sync: bool = False,
):
credentials = service_account.Credentials.from_service_account_file(key_path)
# Initilaize an aiplatfrom object
aiplatform.init(project=project, location=location, credentials=credentials)
# Get a list of Models by Model name
models = aiplatform.Model.list(filter=f'display_name="{model_display_name}"')
model_resource_name = models[0].resource_name
# Get the model
my_model = aiplatform.Model(model_resource_name)
batch_prediction_job = my_model.batch_predict(
job_display_name=job_display_name,
gcs_source=gcs_source,
gcs_destination_prefix=gcs_destination,
machine_type=machine_type,
sync=sync,
)
return batch_prediction_job
datetime_today = datetime.datetime.now()
model_display_name = 'test_model'
key_path = 'vertex_key.json'
project = '<my_project_name>'
location = 'asia-south1'
job_display_name = 'batch_prediction_' + str(datetime_today)
model_name = '1234'
gcs_source = 'gs://<my_bucket_name>/Cleaned_Data/user_item_pairs.jsonl'
gcs_destination = 'gs://<my_bucket_name>/prediction'
create_batch_prediction_job_dedicated_resources_sample(key_path,
project,location,
model_display_name,
job_display_name,
gcs_source,
gcs_destination,
sync=False,)
Results sync=False:
Results sync=True:
i have a project with django .on the host when i want to upload an image sometime error occurred(problem with specific images)! the below show how i resize uploaded images:
def save_files_to_media(request, is_public=False, klass=None, conversation=None):
from apps.file.models import File
fs = FileSystemStorage()
file_items = {}
for data_item in request.data:
file_match = re.search('^fileToUpload\[(\d+)\]$', data_item)
if file_match and file_match.groups():
item_index = file_match.groups()[0]
if item_index not in file_items:
file_items[item_index] = {}
file_items[item_index]['file_to_upload'] = request.data[data_item]
else:
optimize_match = re.search('^optimizeType\[(\d+)\]$', data_item)
if optimize_match and optimize_match.groups():
item_index = optimize_match.groups()[0]
if item_index not in file_items:
file_items[item_index] = {}
file_items[item_index]['optimize_type'] = request.data[data_item]
files = []
for file_item_key in file_items:
input_file = file_items[file_item_key]['file_to_upload']
# TODO: checking validation. if input_file.name is not exist
optimize_type = file_items[file_item_key].get('optimize_type')
file_uuid = str(uuid4())
if is_public:
orig_filename, file_ext = splitext(basename(input_file.name))
directory_name = join(settings.MEDIA_ROOT, file_uuid)
filename = file_uuid + file_ext
else:
directory_name = join(settings.MEDIA_ROOT, file_uuid)
mkdir(directory_name)
filename = input_file.name
filepath = join(directory_name, filename)
fs.save(filepath, input_file)
is_optimized = False
if optimize_type == 'image':
is_success, filepath = image_optimizer(filepath)
filename = basename(filepath)
is_optimized = is_success
file_obj = File(
orig_name=filename,
uuid=file_uuid,
md5sum=get_md5sum(filepath),
filesize=get_filesize(filepath),
meta=get_meta_info(filepath),
is_optimized=is_optimized,
creator=request.user
)
if is_public:
file_obj.is_public = True
else:
file_obj.klass = klass
file_obj.conversation = conversation
file_obj.save()
files.append(file_obj)
return files
here is the error i got with some images:
unsupported Unicode escape sequence
LINE 1: ..., 'ada90ead20f7994837dced344266cc51', 145216, '', '{"FileTyp...
^
DETAIL: \u0000 cannot be converted to text.
CONTEXT: JSON data, line 1: ...ecTimeDigitized": 506779, "MakerNoteUnknownText":
its funny that in my local but not in host. for more information i must tell you guys my postgreSQL version is 11.3 and host postgreSQl is 9.5.17 . where you think is problem? as error it's seems for postgreSQL. thank you
Hi I'm wondering if it possible or how to update single model object name inside for loop by his id using object.filter(pk=id).update(name='name') function
I try to do this but its not working in for loop. It's working only outside loop
EDIT
my edit view with for loop:
first i prepare data based on my database.Data can be modified by other methods so i keep they in global list schedule_table.
When I'm saving project object i want to update other data representing by schedule_table, using for loop.
def ProjectEditView(request, pk):
project = get_object_or_404(Project, pk=17)
schedule_table_load_form_db(project)#preparing list with data
Task_Schedule_TableView(request)
project_form = ProjectForm(request.POST or None, instance=project)
project_form_valid = project_form.is_valid()
if project_form_valid:
with transaction.atomic():
# save form in DB
project = project_form.save(commit=False)
project.save()
# modify other object based on changed list schedule_table
if schedule_table.__len__() > 0:
#my for loop
for p in schedule_table:
team = Team.objects.get(name=p.team)
phase = Phase.objects.filter(pk=p.pk).update(name=p.name,
project=project,
team=team,
order=p.order,
duration=p.duration,
prev=p.prev,
start=p.start,
end=p.end)
# some other staff..... outside the loop
return redirect('Project:ProjectListView')
context = {'project_form': project_form}
return render(request, 'Project/test.html', context)
function to prepare list with data:
def schedule_table_load_form_db(project):
global schedule_table
schedule_table = []
phases = Phase.objects.filter(project=project)
for phase in phases:
tasks_list = Task.objects.filter(phase=phase)
tasks = []
for task in tasks_list:
task_dict = Task_dictionary.objects.get(pk=task.task_dictionary.pk)
tmp = ''
for e in task.employers.employer.all():
user = User.objects.get(pk=e.user_id)
tmp += user.first_name + ' ' + user.last_name + ','
tasks.append(TableTask(pk=task.pk,
order=task.order,
name=task.name,
duration=task.duration,
employer=tmp,
start=task.start,
end=task.end,
min_employers_nr=task_dict.min_employers_nr,
max_employers_nr=task_dict.max_employers_nr,
prev=task.prev_task,
))
schedule_table.append(TablePhase(pk=phase.pk,
name=phase.name,
duration=0.0,
start=phase.start,
end=phase.end,
team=phase.team.name,
task=tasks,
order=schedule_table.__len__(),
prev=phase.prev
))
My scenario goes like this:
wscript:
def options(opt):
opt.load('compiler_c')
opt.load('compiler_cxx')
def configure(ctx):
ctx.load('compiler_c')
ctx.load('compiler_cxx')
def build(ctx):
ctx.objects(source = "file0.c",
target = "cFiles")
ctx.objects(source = "file1.cpp file2.cpp",
target = "cxxFiles")
ctx.stlib(source = "???",
target = "test")
How do I call 'ctx.stlib()'?
ctx(features='c cxx cxxstlib', use='cFiles cxxFiles', target='test')
EDIT: removed dead link.
I am a newbie for Django. Although I don't use the pdb for debugging but I used before and I removed all pdb methods, I get the Bdbquit error with following explanation:
/home/mastersnack/dpt2/app/views.py in start
version = request.session['version']
My related code is:
def start(request):
global Big_Matrix
if 'version' in request.session:
version = request.session['version']
else:
try:
version = Version.objects.order_by('-id')[0]
except IndexError:
return redirect('/')
if not request.user.id:
user = User(guest=True, date_joined=datetime.datetime.now(),
last_login=datetime.datetime.now())
user.save()
request.session["USER_ID"] = user.id
return redirect('/start')
rnd = Round(version=version, user=request.user, date=datetime.datetime.now())
rnd.save()
products = Product.objects.filter(version=version, selectable=True)
(a, b, c) = random.sample(products, 3)
poll = Poll(round=rnd, product_a= a, product_b= b, product_c= c,
date=datetime.datetime.now())
# (a,b,c)=get_meals(Big_Matrix, [])
# poll = Poll(round=rnd, product_a=Product.objects.get(stimuliNum = a, version =
version), product_b=Product.objects.get(stimuliNum = b, version = version),
product_c=Product.objects.get(stimuliNum = c, version = version),
date=datetime.datetime.now())
poll.save()
# (a,b,c)=get_meals(BigMatrix.objects.get(version = version).matrix, [])#bad_ functions)
# poll = Poll(round=rnd, product_a=Product.objects.get(oid = a, version = version), product_b=Product.objects.get(oid = b, version = version), product_c=Product.objects.get(oid = c, version = version), date=datetime.datetime.now())
# poll.save()
return redirect('/preferences')
Should I check my code again for the method maybe I forget to remove?
Make sure that you have removed all pdb methods and that you save your code.
After that if you are running the Django server, restart it.
That should do the trick