I have following conditions
1. stackCreate
2. stackUpdate
3. stackCreate
What I am trying to do is, while the stackCreate/Update/Delete is triggered, I need to check on the progress. How can I do that? I know of 2 wayts
1. openstack stack event list .
2. I have below python code.
stack_id = str(hc.stacks.get(stack_name).id)
hc.stacks.delete(stack_id=stack_id)
try:
evntsdata = hc.events.list(stack_name)[0].to_dict()
event_handle = evntsdata['resource_status']
if event_handle == 'DELETE_IN_PROGRESS':
loopcontinue = True
while loopcontinue:
evntsdata = hc.events.list(stack_name)[0].to_dict()
event_handle = evntsdata['resource_status']
if event_handle == 'DELETE_COMPLETE':
loopcontinue = False
print(str(timestamp()) + " " + "Delete is Completed!")
elif event_handle == 'DELETE_FAILED':
print("Failed") # this needs a proper error msg
sys.exit(0)
else:
print(str(timestamp()) + " " + "Delete in Progress!")
time.sleep(5)
elif event_handle == 'DELETE_COMPLETE':
print(str(timestamp()) + " " + "Delete is Completed!")
sys.exit(0)
elif event_handle == 'DELETE_FAILED':
print("Failed")
sys.exit(0)
except AttributeError as e:
print(str(timestamp()) + " " + "ERROR: Stack Delete Failure")
raise
except (RuntimeError, heatclient.exc.NotFound):
print("Stack doesnt exist:", stack_name)
The first method is shell command in which I am not very good. (or lets say I dont know how to best integrate the shell command in python)
The problem with both the methods is that I am putting these many steps to identify whether the stack delete is successful. And I am repeating the same for stackupdate and create which is not best practice I am thinking. Anyone has any idea how I can minimize this logic? Any help is greatly appreciated.
You can write simple functions to create/update/delete stack and also to check the status of stack.
Please check below sample code to create a stack and poll the status of the stack.
from keystoneauth1 import loading
from keystoneauth1 import session
from heatclient import client
tenant_id = 'ab3fd9ca29e149acb25161ec8053da9c'
heat_url = 'http://10.26.12.31:8004/v1/%s' % tenant_id
auth_token = 'gAAAAABZYxfjz88XNXnfoCPkNLVeVtqtJ9o8qEtgFhI2GJ-ewSCuiypdwt3K5evgQeICVRqMa2jXgzVlENAUB19ZNyQfVCxSX4_lMBKyChM76SGuQUP8U-xJ9EKIfFaVwRGBkk4Ow9OO-iNINfMs0B5-LzJvxTFybi8yZw4EiagQpNpfu1onYfc'
heat = client.Client('1', endpoint=heat_url, token=auth_token)
def create_stack(stack_file_path, stack_name, parameters=None):
template = open(stack_file_path)
if parameters:
stack = heat.stacks.create(stack_name=stack_name, template=template.read(), parameters=parameters)
else:
stack = heat.stacks.create(stack_name=stack_name, template=template.read())
template.close()
return stack
def get_stack_status(stack_id):
stack = heat.stacks.get(stack_id)
return stack.stack_status
def poll_stack_status(stack_id, poll_time=5):
stack_status = get_stack_status(stack_id)
while stack_status != 'CREATE_COMPLETE':
if stack_status == 'CREATE_FAILED':
return 1
time.sleep(poll_time)
stack_status = get_stack_status(stack_id)
return 0
I worked it with below for now. It's not the best I think but satisfies what I need to do.
def stackStatus(status):
evntsdata = hc.events.list(stack_name)[0].to_dict()
event_handle = evntsdata['resource_status'].split("_")
event_handle = '_'.join(event_handle[1:])
if event_handle == 'IN_PROGRESS':
loopcontinue = True
while loopcontinue:
evntsdata = hc.events.list(stack_name)[0].to_dict()
event_handle = evntsdata['resource_status'].split("_")
event_handle = '_'.join(event_handle[1:])
if event_handle == 'COMPLETE':
loopcontinue = False
print(str(timestamp()) + status + " IS COMPLETED!")
elif event_handle == 'FAILED':
print("Failed")
exit(1)
else:
print(str(timestamp()) + status + " IN PROGRESS!")
time.sleep(5)
Call this function
stackStatus("DELETE")
stackStatus("CREATE")
stackStatus("UPDATE")
Related
I'm trying to use Paho MQTT Client and Multiprocessing to send temperature with defined interval. However, publish command is not working inside class. I've checked self.mqtt_client inside scheduler it has the Client object.
Is there anyone that can address problem for me?
Everything inside class is working except Scheduler.
def scheduler(self, topic, interval):
if interval != 0:
while True:
if topic == "temp":
print("Temperature published " + interval) #It's working.
self.mqtt_client.publish(topic, interval , 0 , False) #There is no error/output about this line
time.sleep(int(interval))
else:
pass
Class:
class Switcher:
config = None
mqtt_client = None
mqtt_connected = False
switches = {}
stages = {}
def __init__(self, config):
self.config = config
for switch_cfg in self.config['switches']:
self.switches[switch_cfg['topic_set']] = Switch(int(switch_cfg['gpio']), switch_cfg['topic_status'], switch_cfg['initial'])
def scheduler(self, topic, interval):
if interval != 0:
while True:
if topic == "temp":
print("Temperature published " + interval) #It's working.
self.mqtt_client.publish(topic, interval , 0 , False) #There is no error/output about this line
time.sleep(int(interval))
else:
pass
def mqtt_connect(self):
if self.mqtt_broker_reachable():
self.verbose('Connecting to ' + self.config['mqtt_host'] + ':' + self.config['mqtt_port'])
self.mqtt_client = mqtt.Client(self.config['mqtt_client_id'])
if 'mqtt_user' in self.config and 'mqtt_password' in self.config:
self.mqtt_client.username_pw_set(self.config['mqtt_user'], self.config['mqtt_password'])
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_client.on_message = self.mqtt_on_message
try:
self.mqtt_client.connect(self.config['mqtt_host'], int(self.config['mqtt_port']), 10)
for switch_cfg in self.config['switches']:
self.mqtt_client.subscribe(switch_cfg['topic_set'], 0)
self.mqtt_client.loop_forever()
except:
self.error(traceback.format_exc())
self.mqtt_client = None
else:
self.error(self.config['mqtt_host'] + ':' + self.config['mqtt_port'] + ' not reachable!')
def mqtt_on_connect(self, mqtt_client, userdata, flags, rc):
self.mqtt_connected = True
for switch_ios in self.config['switches']:
self.mqtt_client.publish(self.config['station_status'], "available", 0, False)
self.mqtt_client.publish(switch_ios['topic_status'], self.switches[switch_ios['topic_set']].get_state(), 0, False)
temp_interval = 1
temp_process = multiprocessing.Process(target=self.scheduler, args=("temp",str(temp_interval),))
temp_process.start()
self.verbose('...mqtt_connected!')
def mqtt_broker_reachable(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((self.config['mqtt_host'], int(self.config['mqtt_port'])))
s.close()
return True
except socket.error:
return False
def start(self):
self.mqtt_connect()
You mqtt_connect function will never return.
self.mqtt_client.loop_forever() Will block until self.mqtt_client.disconnect() is called.
You should probably be using self.mqtt_client.loop_start() which will run the client loop on it's own thread in the background. You can call self.mqtt_client.loop_stop() when you want to shut the client down.
" i am trying to read data from the serial connection and doing some stuff if it matches my string but its giving me errors when i close the serial connection port"
" for some reason i do not see this error if i use the serial.readline() method "
import time
import serial
from Queue import Queue
from threading import Thread
class NonBlocking:
def __init__(self, serial_connection, radio_serial_connection):
self._s = serial_connection
self._q = Queue()
self.buf = bytearray()
def _populateQueue(serial_connection, queue):
if type(serial_connection) == str:
return
self.s = serial_connection
while True:
i = self.buf.find(b"\n")
if i >= 0:
r = self.buf[:i + 1]
self.buf = self.buf[i + 1:]
queue.put(r)
while True:
i = max(1, min(2048, self.s.in_waiting))
data = self.s.read(i)
i = data.find(b"\n")
if i >= 0:
r = self.buf + data[:i + 1]
self.buf[0:] = data[i + 1:]
a = r.split('\r\n')
for item in a:
if item:
queue.put(item)
else:
self.buf.extend(data)
self._t = Thread(target=_populateQueue, args=(self._s, self._q))
self._t.daemon = True
self._t.start()
def read_all(self, timeout=None):
data = list()
if self._q.empty():
pass
while not self._q.empty():
data.append(self._q.get(block=timeout is not None, timeout=timeout))
return data
class SerialCommands:
def __init__(self, port, baudrate):
self.serial_connection = serial.Serial(port, baudrate)
self.queue_data = NonBlocking(self.serial_connection, '')
def read_data(self):
returned_info = self.queue_data.read_all()
return returned_info
def close_q(self):
self.serial_connection.close()
class qLibrary:
def __init__(self):
self.q = None
self.port = None
def close_q_connection(self):
self.q.close_q()
def establish_connection_to_q(self, port, baudrate=115200, delay=2):
self.delay = int(delay)
self.port = port
try:
if not self.q:
self.q = SerialCommands(self.port, int(baudrate))
except IOError:
raise AssertionError('Unable to open {0}'.format(port))
def verify_event(self, data, timeout=5):
timeout = int(timeout)
data = str(data)
# print data
while timeout:
try:
to_analyze = self.q.read_data()
for item in to_analyze:
print "item: ", item
if str(item).find(str(data)) > -1:
print "Found data: '{0}' in string: '{1}'".format(data, item)
except:
pass
time.sleep(1)
timeout -= 1
if __name__ == '__main__':
q1 = qLibrary()
q1.establish_connection_to_q('COM5')
q1.verify_event("ATE")
q1.close_q_connection()
" i expect the code to close the serial connection without any exceptions or errors "
the output is
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python27\Lib\threading.py", line 801, in __bootstrap_inner
self.run()
File "C:\Python27\Lib\threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "C:/Program Files (x86)/serialtest1.py", >line 27, in _populateQueue
data = self.s.read(i)
File "C:\Program Files (x86)\venv\lib\site->packages\serial\serialwin32.py", line 283, in read
ctypes.byref(self._overlapped_read))
TypeError: byref() argument must be a ctypes instance, not 'NoneType'
If you define your serial port with no timeout it will get the default setting timeout=None which means when you call serial.read(x) the code will block until you read x bytes.
If you never get those x bytes your code will get stuck in there waiting forever, or at least until you receive more data on the buffer to get the total number of bytes received equal to x.
If you mix that up with threading, I'm afraid you are quite likely closing the port while you are trying to read.
You can probably fix this issue just defining a sensible read timeout on your port or changing the way you read. The general advice is to set a timeout that works for your application and read at least the maximum number of bytes you expect. Reading your code, that seems to be what you wanted to do. If so, you forgot to set the timeout.
If you have a reason not to set a timeout or you want to keep your reading routine as it is, you can make your code work if you cancel reading before closing. You can do that with serial.cancel_read()
Using ruamel.yaml, the output of roundtrip parsing of the YAML
a: {b: }
is
a: {b: !!null ''}
Is there any way to preserve the empty message, or overwrite the None representer to output an empty message as above?
This is not trivial and I am not sure if the solution presented below doesn't have adverse side-effects.
The cause for the non-triviality has to do with multiple things:
You are using the less readable flow style
a: {b: }
instead of block style:
a:
b:
The latter round-trips without a change
The empty value for key b loads as None, which I have not been able to subclass in ruamel.yaml, and hence style information cannot be attached to that value, and you have to rely on the "default" emitter (which in your case doesn't do what you want).
This
a: {b:}
is entirely different from your
a: {b: }
and currently the emitter goes for safe and inserts tag information into the stream.
With that background information, you can force the style of the representation for None to the empty string and based on that hack the emitter:
import sys
import ruamel.yaml
yaml_str = """\
a: {b: }
"""
class MyEmitter(ruamel.yaml.emitter.Emitter):
def choose_scalar_style(self):
# override selection for 'null'
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if (not self.event.style or self.event.style == '?') and \
(self.event.implicit[0] or not self.event.implicit[2]):
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline)) and
(self.flow_level and self.analysis.allow_flow_plain or
(not self.flow_level and self.analysis.allow_block_plain))):
return ''
if (self.event.style == '') and self.event.tag == 'tag:yaml.org,2002:null' and \
(self.event.implicit[0] or not self.event.implicit[2]):
if self.flow_level and not self.analysis.allow_flow_plain:
return ''
self.analysis.allow_block = True
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context and
self.analysis.allow_block):
return self.event.style
if not self.event.style and self.analysis.allow_double_quoted:
if "'" in self.event.value or '\n' in self.event.value:
return '"'
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
# if style '' and tag is 'null' insert empty space
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
if self.sequence_context and not self.flow_level:
self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
elif self.event.tag == 'tag:yaml.org,2002:null':
self.stream.write(u' ') # not sure if this doesn't break other things
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
if self.event.comment:
self.write_post_comment(self.event)
class MyRepresenter(ruamel.yaml.representer.RoundTripRepresenter):
def represent_none(self, data):
if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
# this will be open ended (although it is not yet)
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
return self.represent_scalar(u'tag:yaml.org,2002:null', u'', style='')
MyRepresenter.add_representer(type(None),
MyRepresenter.represent_none)
yaml = ruamel.yaml.YAML()
yaml.Emitter = MyEmitter
yaml.Representer = MyRepresenter
data = yaml.load(yaml_str)
yaml.dump(data, sys.stdout)
which gives:
a: {b: }
I have reset_company_limits module for updating limit of a company, this task is running on daily basis in morning. But somehow some company limit is unable to update. I am not able to find any error in my code
def reset_company_limits(comp_id=None, reset_type=['daily', 'monthly']):
if comp_id:
company_accounts = CompanyAccount.objects.filter(id=comp_id, enabled=1)
else:
company_accounts = CompanyAccount.objects.filter(enabled=1)
reset, rcache, cache_rec = ({} for l in xrange(3))
error = []
for rtype in reset_type:
for account in company_accounts:
try:
counts = account.get_feature_reset_counts(feature_type=reset_type)
limits = account.get_limits()
recruiters = account.recruiter_set.filter(is_active=True).values('pk')
for limit in limits:
key = RECRUITER_LOOKUP[limit.type]
if key in counts[rtype]:
if rtype == 'daily':
limit.daily = counts[rtype][key]
elif rtype == 'monthly':
limit.monthly = counts[rtype][key]
limit.save()
except Exception, e:
error.append(account.pk)
print "Error occurred for company id %s :: %s" % (account.pk, e)
return reset, rcache, cache_rec, error
I am making a simple math test for my friend's class. The students will only have 45 seconds to solve each answer. Is there a way to make a timer that will count at the same time as the rest of the code runs and when it reaches 45 stops?
The test looks like this:
test = raw_input("How much is 62x5-23?")
if test == '287':
print "Well done!"
Here's some code I used once (lifted off some page on the web which is now in the hands of a domain squatter, so no credit where credit is due, sadly):
import signal
class TimeoutException(Exception):
pass
def timeout(timeout_time, default = None):
def timeout_function(f):
def f2(*args, **kwargs):
def timeout_handler(signum, frame):
raise TimeoutException()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout_time) # triger alarm in timeout_time seconds
try:
retval = f(*args, **kwargs)
except TimeoutException, e:
if default == None:
raise e
return default
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return retval
return f2
return timeout_function
# use like this:
#timeout(45)
def run():
test = raw_input("How much is 62x5-23? ")
if test == '287':
print "Well done!"
# alternatively, pass a value that will be returned when the timeout is reached:
#timeout(45, False)
def run2():
test = raw_input("How much is 62x5-23? ")
if test == '287':
print "Well done!"
if __name__ == '__main__':
try:
run()
except TimeoutException:
print "\nSorry, you took too long."
# alternative call:
if run2() == False:
print "\nSorry, you took too long."
EDIT: probably works on Unix-type OS'es only.