SIGHUP signal give exception with pika in python 2.7 - python-2.7

I have a pika consumer
when i run it and sends SIGHUP signal, It gives me an exception
Consumertest.py
import signal
import traceback
import pika
from time import sleep
received_signal = False
def signal_handler(signal, frame):
global received_signal
received_signal = True
exit(1)
def sighup_handler(signal, frame):
print "sighup received"
signal.signal(signal.SIGHUP, sighup_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
mq_server = "localhost"
mq_exchange = "my_exchange"
my_queue = "test_queue"
try:
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(mq_server))
except:
exit(1)
mq_channel = mq_connection.channel()
mq_channel.exchange_declare(exchange=mq_exchange, durable=True)
mq_channel.queue_declare(queue=my_queue, durable=True)
mq_channel.queue_bind(my_queue, mq_exchange)
def callback(ch, method, properties, body):
try:
sleep(10)
ch.basic_reject(delivery_tag=method.delivery_tag)
except Exception as e:
traceback.print_exc() # region Message consumption
try:
print ' [*] Waiting for messages. To exit press CTRL+C'
mq_channel.basic_consume(callback, queue=my_queue)
mq_channel.start_consuming()
except Exception as e:
traceback.print_exc()
while True:
pass
exception:
[*] Waiting for messages. To exit press CTRL+C
sighup received
Traceback (most recent call last):
File "/run/media/bluto/04D0CF8ED0CF8500/Email_Projects/new_email_workers/testsigHup.py", line 47, in <module>
mq_channel.start_consuming()
File "/usr/lib/python2.7/site-packages/pika/adapters/blocking_connection.py", line 814, in start_consuming
self.connection.process_data_events()
File "/usr/lib/python2.7/site-packages/pika/adapters/blocking_connection.py", line 168, in process_data_events
if self._handle_read():
File "/usr/lib/python2.7/site-packages/pika/adapters/blocking_connection.py", line 271, in _handle_read
if self._read_poller.ready():
File "/usr/lib/python2.7/site-packages/pika/adapters/blocking_connection.py", line 54, in ready
events = self.poller.poll(self.poll_timeout)
error: (4, 'Interrupted system call')

Related

Scrapy error when run with Celery and Django Channels is installed: OSError: [Errno 9] Bad file descriptor

When I run this script with Django shell it will run without error, but when I run with celery worker it get this error: OSError: [Errno 9] Bad file descriptor. If I remove channels from INSTALLED_APPS the problem will be solved and task runs with celery worker without error. Running the same script with python Process (due to ReactorNotRestartable exception) in django shell get the same error which celery worker had: OSError: [Errno 9] Bad file descriptor.
I think there's a conflict between Django Channels & Scrapy when spider is executed in another process.
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
def run_spider(spider)
settings = get_project_settings()
crawler_process = CrawlerProcess(settings=settings)
crawler_process.crawl(spider)
crawler_process.start()
exception stack trace when run with celery worker:
Task run spider raised unexpected: OSError(9, 'Bad file descriptor')
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/selector_events.py", line 256, in _add_reader
key = self._selector.get_key(fd)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/selectors.py", line 192, in get_key
raise KeyError("{!r} is not registered".format(fileobj)) from None
KeyError: '9 is not registered'
exception stack trace when run with python Process in Django shell:
/usr/local/lib/python3.7/site-packages/billiard/pool.py in apply(self, func, args, kwds)
1387 '''
1388 if self._state == RUN:
-> 1389 return self.apply_async(func, args, kwds).get()
1390
1391 def starmap(self, func, iterable, chunksize=None):
/usr/local/lib/python3.7/site-packages/billiard/pool.py in get(self, timeout)
1787 return self._value
1788 else:
-> 1789 raise self._value.exception
1790
1791 def safe_apply_callback(self, fun, *args, **kwargs):
OSError: [Errno 9] Bad file descriptor

Using multiprocessing.Pool with exception handling

from multiprocessing import Pool
def f(arg):
if arg == 1:
raise Exception("exception")
return "hello %s" % arg
p = Pool(4)
res = p.map_async(f,(1,2,3,4))
p.close()
p.join()
res.get()
Consider this contrived example where I am creating a process pool of 4 workers and assigning work in f(). My question was:
How can I retrieve the successful work that was done for arguments 2,3,4 (and at the same time do exception handling for argument 1) ?
As is the code just gives me:
Traceback (most recent call last):
File "process_test.py", line 13, in <module>
res.get()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
Exception: exception
You can also just do the error handling in the work function
def do_work(x):
try:
return (None, something_with(x))
except Exception as e:
return (e, None)
output = Pool(n).map(do_work, input)
for exc, result in output:
if exc:
handle_exc(exc)
else:
handle_result(result)
You can use the imap function.
iterator = p.imap(f, (1,2,3,4,5))
while True:
try:
print next(iterator)
except StopIteration:
break
except Exception as error:
print error

error in server side of pygame video streaming

i am new to pygame and trying to stream video over pygame.
my client side is working fine but in sender side i get error and sender and client are not connecting.
my sender code is:
import socket,os
from PIL import *
import pygame,sys
import pygame.camera
from pygame.locals import *
#Create server:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("10.3.100.207",5000))
server.listen(5)
#Start Pygame
pygame.init()
pygame.camera.init()
screen = pygame.display.set_mode((320,240))
cam = pygame.camera.Camera("/dev/video0",(320,240),"RGB")
cam.start()
#Send data
while True:
s,add = server.accept()
print "Connected from",add
image = cam.get_image()
screen.blit(image,(0,0))
data = cam.get_raw()
s.sendall(data)
pygame.display.update()
#Interupt
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
but when i run this code i get this following error
siplab#siplab-OptiPlex-9020:~$ python tut1.py
Traceback (most recent call last):
File "tut1.py", line 9, in <module>
server.bind(("10.3.100.207",5000))
File "/usr/lib/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
socket.error: [Errno 99] Cannot assign requested address

Get user input while listening for connections to server using twisted python

I am currently trying work on a project using Twisted Python, my problem specifically is my attempt to gain user input whilst also listening for connections using listenTCP(). I looked up the problem originally and found that stdio.StandardIO seems the most efficient way of doing so since I am already using Twisted. I have also seen the code examples found on twisted matrix, stdin.py and also stdiodemo.py however I am struggling with how to apply to example code to my specific problem given I need to read from the socket and also gather user input while performing tcp tasks.
The project I am working on is much larger however the small example code illustrates what I am trying to do and isolates the problem I am having. Any help in solving my problem is really appreciated.
Server.py
from twisted.internet.protocol import Factory
from twisted.protocols import basic
from twisted.internet import reactor, protocol, stdio
from Tkinter import *
import os, sys
class ServerProtocol(protocol.Protocol):
def __init__(self, factory):
self.factory = factory
stdio.StandardIO(self)
def connectionMade(self):
self.factory.numConnections += 1
self.factory.clients.append(self)
def dataReceived(self, data):
try:
print 'receiving data'
print data
except Exception, e:
print e
def connectionLost(self, reason):
self.factory.numConnections -= 1
self.factory.clients.remove(self)
class ServerFactory(Factory):
numConnections = 0
def buildProtocol(self, addr):
return ServerProtocol(self)
class StdioCommandLine(basic.LineReceiver):
from os import linesep as delimiter
def connectionMade(self):
self.transport.write('>>> ')
def lineReceived(self, line):
self.sendLine('Echo: ' + line)
self.transport.write('>>> ')
reactor.listenTCP(9001, ServerFactory())
stdio.StandardIO(StdioCommandLine())
reactor.run()
Client.py
from twisted.internet import reactor, protocol
import os, time, sys
import argparse
class MessageClientProtocol(protocol.Protocol):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
self.sendMessage()
def sendMessage(self):
print 'sending message'
try:
self.transport.write('hello world')
except e as Exception:
print e
def dataReceived(self, data):
print 'received: ', data
self.sendMessage()
class MessageClientFactory(protocol.ClientFactory):
def __init__(self, message):
self.message = message
def buildProtocol(self, addr):
return MessageClientProtocol(self)
def clientConnectionFailed(self, connector, reason):
print 'Connection Failed: ', reason.getErrorMessage()
reactor.stop()
def clientConnectionLost(self, connector, reason):
print 'Connection Lost: ', reason.getErrorMessage()
reactor.connectTCP('192.168.1.70', 9001, MessageClientFactory('hello world - client'))
reactor.run()
At the moment the above code is returning an Unhanded Error as follows. This demonstrates me using stdin, then it returning the data to stdout and a client the connects causing the error:
python Server.py
>>> hello
Echo: hello
>>> Unhandled Error Traceback (most recent call last):
File
"/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/python/log.py",
line 84, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File
"/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/python/context.py",
line 118, in callWithContext return
self.currentContext().callWithContext(ctx, func, *args, **kw)
File
"/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/python/context.py",
line 81, in callWithContext
return func(*args,**kw)
File
"/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/selectreactor.py",
line 149, in _doReadOrWrite
why = getattr(selectable, method)()
--- exception caught here ---
File
"/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/tcp.py",
line 1067, in doRead
protocol = s
The traceback you provided seems to be cut off. I tried to run the code on my machine and it shows this traceback:
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/twisted/python/log.py", line 84, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/usr/lib/python2.7/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/lib/python2.7/site-packages/twisted/python/context.py", line 81, in callWithContext
return func(*args,**kw)
File "/usr/lib/python2.7/site-packages/twisted/internet/posixbase.py", line 597, in _doReadOrWrite
why = selectable.doRead()
--- <exception caught here> ---
File "/usr/lib/python2.7/site-packages/twisted/internet/tcp.py", line 1067, in doRead
protocol = self.factory.buildProtocol(self._buildAddr(addr))
File "Server.py", line 30, in buildProtocol
return ServerProtocol(self)
File "Server.py", line 10, in __init__
stdio.StandardIO(self)
File "/usr/lib/python2.7/site-packages/twisted/internet/_posixstdio.py", line 42, in __init__
self.protocol.makeConnection(self)
File "/usr/lib/python2.7/site-packages/twisted/internet/protocol.py", line 490, in makeConnection
self.connectionMade()
File "Server.py", line 14, in connectionMade
self.factory.clients.append(self)
exceptions.AttributeError: ServerFactory instance has no attribute 'clients'
As can be easily seen with the full traceback, the factory is missing the client attribute. This can be fixed e.g. by adding this to your ServerFactory class:
def __init__(self):
self.clients = []

Stop a python 2.7 daemon by the signal

I use python 2.7.3 and daemon runner in my script. In a run(loop) method i want to sleep for the some time, but not with the such code:
while True:
time.sleep(10)
I want wait on a some synchronizing primitive, for example multiprocessing.Event. There is my code:
# -*- coding: utf-8 -*-
import logging
from daemon import runner
import signal
import multiprocessing
import spyder_cfg
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M', filename=spyder_cfg.log_file)
class Daemon(object):
def __init__(self, pidfile_path):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = None
self.pidfile_timeout = 5
self.pidfile_path = pidfile_path
def setup_daemon_context(self, daemon_context):
self.daemon_context = daemon_context
def run(self):
logging.info('Spyder service has started')
logging.debug('event from the run() = {}'.format(self.daemon_context.stop_event))
while not self.daemon_context.stop_event.wait(10):
try:
logging.info('Spyder is working...')
except BaseException as exc:
logging.exception(exc)
logging.info('Spyder service has been stopped')
def handle_exit(self, signum, frame):
try:
logging.info('Spyder stopping...')
self.daemon_context.stop_event.set()
except BaseException as exc:
logging.exception(exc)
if __name__ == '__main__':
app = Daemon(spyder_cfg.pid_file)
d = runner.DaemonRunner(app)
d.daemon_context.working_directory = spyder_cfg.work_dir
d.daemon_context.files_preserve = [h.stream for h in logging.root.handlers]
d.daemon_context.signal_map = {signal.SIGUSR1: app.handle_exit}
d.daemon_context.stop_event = multiprocessing.Event()
app.setup_daemon_context(d.daemon_context)
logging.debug('event from the main = {}'.format(d.daemon_context.stop_event))
d.do_action()
It is my log file records:
06-04 11:32 root DEBUG event from the main = <multiprocessing.synchronize.Event object at 0x7f0ef0930d50>
06-04 11:32 root INFO Spyder service has started
06-04 11:32 root DEBUG event from the run() = <multiprocessing.synchronize.Event object at 0x7f0ef0930d50>
06-04 11:32 root INFO Spyder is working...
06-04 11:32 root INFO Spyder stopping...
There is not 'Spyder service has been stopped' print in the log, my program hang on the set() call. While debugging i see that it hang when Event.set() call, the set method hang on semaphore while all waiting entities wake up. There is no reason if Event will be global object or threading.Event. I see this one answer, but its not good for me. Is there an alternative for wait with the timeout wait with the same behavior as multiprocessing.Event?
I do print stack from the signal handler and i think there is deadlock, because signal handler use same stack with the my main process and when i call Event.set(), method wait() higher on the stack...
def handle_exit(self, signum, frame):
try:
logging.debug('Signal handler:{}'.format(traceback.print_stack()))
except BaseException as exc:
logging.exception(exc)
d.do_action()
File ".../venv/work/local/lib/python2.7/site-packages/daemon/runner.py", line 189, in do_action
func(self)
File ".../venv/work/local/lib/python2.7/site-packages/daemon/runner.py", line 134, in _start
self.app.run()
File ".../venv/work/skelet/src/spyder.py", line 32, in run
while not self.daemon_context.stop_event.wait(10):
File "/usr/lib/python2.7/multiprocessing/synchronize.py", line 337, in wait
self._cond.wait(timeout)
File "/usr/lib/python2.7/multiprocessing/synchronize.py", line 246, in wait
self._wait_semaphore.acquire(True, timeout)
File ".../venv/work/skelet/src/spyder.py", line 41, in handle_exit
logging.debug('Signal handler:{}'.format(traceback.print_stack()))
that is why this fix solve the problem:
def handle_exit(self, signum, frame):
t = Timer(1, self.handle_exit2)
t.start()
def handle_exit2(self):
self.daemon_context.stop_event.set()