I aim to make my jarvis, which listens all the time and activates when I say hello. I learned that Google cloud Speech to Text API doesn't listen for more than 60 seconds, but then I found this not-so-famous link, where this listens for infinite duration. The author of github script says that, he has played a trick that script refreshes after 60 seconds, so that program doesn't crash.
https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/speech/cloud-client/transcribe_streaming_indefinite.py
Following is the modified version, since I wanted it to answer of my questions, followed by "hello", and not answer me all the time. Now if I ask my Jarvis, a question, which while answering takes more than 60 seconds and it doesn't get the time to refresh, the program crashes down :(
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Speech API sample application using the streaming API.
NOTE: This module requires the additional dependency `pyaudio`. To install
using pip:
pip install pyaudio
Example usage:
python transcribe_streaming_indefinite.py
"""
# [START speech_transcribe_infinite_streaming]
from __future__ import division
import time
import re
import sys
import os
from google.cloud import speech
from pygame.mixer import *
from googletrans import Translator
# running=True
translator = Translator()
init()
import pyaudio
from six.moves import queue
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "C:\\Users\\mnauf\\Desktop\\rehandevice\\key.json"
from commands2 import commander
cmd=commander()
# Audio recording parameters
STREAMING_LIMIT = 55000
SAMPLE_RATE = 16000
CHUNK_SIZE = int(SAMPLE_RATE / 10) # 100ms
def get_current_time():
return int(round(time.time() * 1000))
def duration_to_secs(duration):
return duration.seconds + (duration.nanos / float(1e9))
class ResumableMicrophoneStream:
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk_size):
self._rate = rate
self._chunk_size = chunk_size
self._num_channels = 1
self._max_replay_secs = 5
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
self.start_time = get_current_time()
# 2 bytes in 16 bit samples
self._bytes_per_sample = 2 * self._num_channels
self._bytes_per_second = self._rate * self._bytes_per_sample
self._bytes_per_chunk = (self._chunk_size * self._bytes_per_sample)
self._chunks_per_second = (
self._bytes_per_second // self._bytes_per_chunk)
def __enter__(self):
self.closed = False
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=self._num_channels,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk_size,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, *args, **kwargs):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
if get_current_time() - self.start_time > STREAMING_LIMIT:
self.start_time = get_current_time()
break
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
def search(responses, stream, code):
responses = (r for r in responses if (
r.results and r.results[0].alternatives))
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
top_alternative = result.alternatives[0]
transcript = top_alternative.transcript
# music.load("/home/pi/Desktop/rehandevice/end.mp3")
# music.play()
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
#print(transcript + overwrite_chars)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if code=='ur-PK':
transcript=translator.translate(transcript).text
print("Your command: ", transcript + overwrite_chars)
if "hindi assistant" in (transcript+overwrite_chars).lower():
cmd.respond("Alright. Talk to me in urdu",code=code)
main('ur-PK')
elif "english assistant" in (transcript+overwrite_chars).lower():
cmd.respond("Alright. Talk to me in English",code=code)
main('en-US')
cmd.discover(text=transcript + overwrite_chars,code=code)
for i in range(10):
print("Hello world")
break
num_chars_printed = 0
def listen_print_loop(responses, stream, code):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1#streamingrecognizeresponse. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
responses = (r for r in responses if (
r.results and r.results[0].alternatives))
music.load(r"C:\\Users\\mnauf\\Desktop\\rehandevice\\coins.mp3")
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
top_alternative = result.alternatives[0]
transcript = top_alternative.transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print("Listen print loop", transcript + overwrite_chars)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(hello)\b', transcript.lower(), re.I):
#print("Give me order")
music.play()
search(responses, stream,code)
break
elif re.search(r'\b(ہیلو)\b', transcript, re.I):
music.play()
search(responses, stream,code)
break
num_chars_printed = 0
def main(code):
cmd.respond("I am Rayhaan dot A Eye. How can I help you?",code=code)
client = speech.SpeechClient()
config = speech.types.RecognitionConfig(
encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=SAMPLE_RATE,
language_code='en-US',
max_alternatives=1,
enable_word_time_offsets=True)
streaming_config = speech.types.StreamingRecognitionConfig(
config=config,
interim_results=True)
mic_manager = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE)
print('Say "Quit" or "Exit" to terminate the program.')
with mic_manager as stream:
while not stream.closed:
audio_generator = stream.generator()
requests = (speech.types.StreamingRecognizeRequest(
audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config,
requests)
# Now, put the transcription responses to use.
try:
listen_print_loop(responses, stream, code)
except:
listen
if __name__ == '__main__':
main('en-US')
# [END speech_transcribe_infinite_streaming]
You can call your functions after recognition in different thread. Example:
new_thread = Thread(target=music.play)
new_thread.daemon = True # Not always needed, read more about daemon property
new_thread.start()
Or if you want just to prevent exception - you can always use try/except. Example:
with mic_manager as stream:
while not stream.closed:
try:
audio_generator = stream.generator()
requests = (speech.types.StreamingRecognizeRequest(
audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config,
requests)
# Now, put the transcription responses to use.
listen_print_loop(responses, stream, code)
except BaseException as e:
print("Exception occurred - {}".format(str(e)))
Related
I was testing streaming processing of google cloud pub/sub.
Forward message from publisher to topic, reading the message on the pub/sub on apache-beam and checking it with beam.Map(print).
Reading messages from the pub/sub, it worked. But, an error occurred after reading the messages all.
ㅡ. This code delivers messages from publisher to topic
from google.cloud import pubsub_v1
from google.cloud import bigquery
import time
# TODO(developer)
project_id = [your-project-id]
topic_id = [your-topic-id]
# Construct a BigQuery client object.
client = bigquery.Client()
# Configure the batch to publish as soon as there is ten messages,
# one kilobyte of data, or one second has passed.
batch_settings = pubsub_v1.types.BatchSettings(
max_messages=10, # default 100
max_bytes=1024, # default 1 MB
max_latency=1, # default 10 ms'
)
publisher = pubsub_v1.PublisherClient(batch_settings)
topic_path = publisher.topic_path(project_id, topic_id)
query = """
SELECT *
FROM `[bigquery-schema.bigquery-dataset.bigquery-tablename]`
LIMIT 20
"""
query_job = client.query(query)
# Resolve the publish future in a separate thread.
def callback(topic_message):
message_id = topic_message.result()
print(message_id)
print("The query data:")
for row in query_job:
data = u"category={}, language={}, count={}".format(row[0], row[1], row[2])
print(data)
data = data.encode("utf-8")
time.sleep(1)
topic_message = publisher.publish(topic_path, data=data)
topic_message.add_done_callback(callback)
print("Published messages with batch settings.")
ㅡ. Apache-beam code [for reading and processing data from pub/sub]
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START pubsub_to_gcs]
import argparse
import datetime
import json
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import apache_beam.transforms.window as window
pipeline_options = PipelineOptions(
streaming=True,
save_main_session=True,
runner='DirectRunner',
return_immediately=True,
initial_rpc_timeout_millis=25000,
)
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
def process(self, element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
class WriteBatchesToGCS(beam.DoFn):
def __init__(self, output_path):
self.output_path = output_path
def process(self, batch, window=beam.DoFn.WindowParam):
"""Write one batch per file to a Google Cloud Storage bucket. """
ts_format = "%H:%M"
window_start = window.start.to_utc_datetime().strftime(ts_format)
window_end = window.end.to_utc_datetime().strftime(ts_format)
filename = "-".join([self.output_path, window_start, window_end])
with beam.io.gcp.gcsio.GcsIO().open(filename=filename, mode="w") as f:
for element in batch:
f.write("{}\n".format(json.dumps(element)).encode("utf-8"))
class test_func(beam.DoFn) :
def __init__(self, delimiter=','):
self.delimiter = delimiter
def process(self, topic_message):
print(topic_message)
def run(input_topic, output_path, window_size=1.0, pipeline_args=None):
# `save_main_session` is set to true because some DoFn's rely on
# globally imported modules.
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
with beam.Pipeline(options=pipeline_options) as pipeline:
(
pipeline
| "Read PubSub Messages"
>> beam.io.ReadFromPubSub(topic=input_topic)
| "Pardo" >> beam.ParDo(test_func(','))
)
if __name__ == "__main__": # noqa
input_topic = 'projects/[project-id]/topics/[pub/sub-name]'
output_path = 'gs://[bucket-name]/[file-directory]'
run(input_topic, output_path, 2)
# [END pubsub_to_gcs]
As a temporary measure, I set return_immediately=True. but, This is not a fundamental solution either.
Thank you for reading it.
This seems to be a known issue of the PubSub libraries reported in other SO thread and it looks that it was recently addressed with version 1.4.2 but not yet included in the BEAM dependencies that's still using google-cloud-pubsub>=0.39.0,<1.1.0.
I made some research and found that DataflowRunner appears to handle this error better than DirectRunner, which is maintained by Apache Beam team. The issue has been already reported on beam site, and it's not resolved yet.
Also please be advised that the troubleshooting guide for DEADLINE_EXCEEDED error can be found here. You can check if any of the presented advices could help in your case, such as upgrading to the latest version of the client library.
I like to parallel two functions, one for image batching (streaming all 25 images for processing) and another one for processing batched images. They need to be in parallel.
So I have main function for batching images BatchStreaming(self) and processing for BatchProcessing(self, b_num). Now BatchStreaming is working well. After streaming 25 images, need to proceed for batch processing. I have two parallel processes. They are
(1)While loop in BatchStreaming need to continue for another batch of images.
(2)At the same time, current batched images need to be processed.
I am confusing whether I should use process or thread. I prefer process as I like to utilize all cores in CPU. (Python's thread run only on one CPU core)
Then I have two issues
(1)Process has to join back to main program to proceed. But I need to continue for next batch of images.
(2)In the following program, when BatchProcessing(self, b_num) is called and have exception as
Caught Main Exception
(<class 'TypeError'>, TypeError("'module' object is not callable",), <traceback object at 0x7f98635dcfc8>)
What could be issue?
The code is as follow.
import multiprocessing as MultiProcess
import time
import vid_streamv3 as vs
import cv2
import sys
import numpy as np
import os
BATCHSIZE=25
CHANNEL=3
HEIGHT=480
WIDTH=640
ORGHEIGHT=1080
ORGWIDTH=1920
class ProcessPipeline:
def __init__(self):
#Current Cam
self.camProcess = None
self.cam_queue = MultiProcess.Queue(maxsize=100)
self.stopbit = None
self.camlink = 'rtsp://root:pass#192.168.0.90/axis-media/media.amp?camera=1' #Add your RTSP cam link
self.framerate = 25
self.fullsize_batch1=np.zeros((BATCHSIZE, ORGHEIGHT, ORGWIDTH, CHANNEL), dtype=np.uint8)
self.fullsize_batch2=np.zeros((BATCHSIZE, ORGHEIGHT, ORGWIDTH, CHANNEL), dtype=np.uint8)
self.batch1_is_processed=False
def BatchStreaming(self):
#get all cams
time.sleep(3)
self.stopbit = MultiProcess.Event()
self.camProcess = vs.StreamCapture(self.camlink,
self.stopbit,
self.cam_queue,
self.framerate)
self.camProcess.start()
count=0
try:
while True:
if not self.cam_queue.empty():
cmd, val = self.cam_queue.get()
if cmd == vs.StreamCommands.FRAME:
if val is not None:
print('streaming starts ')
if(self.batch1_is_processed == False):
self.fullsize_batch1[count]=val
else:
self.fullsize_batch2[count]=val
count=count+1
if(count>=25):
if(self.batch1_is_processed == False):#to start process for inference and post processing for batch 1
self.batch1_is_processed = True
print('batch 1 process')
p = MultiProcess(target=self.BatchProcessing, args=(1,))
else:#to start process for inference and post processing for batch 2
self.batch1_is_processed = False
print('batch 2 process')
p = MultiProcess(target=self.BatchProcessing, args=(2,))
p.start()
print('BatchProcessing start')
p.join()
print('BatchProcessing join')
count=0
cv2.imshow('Cam: ' + self.camlink, val)
cv2.waitKey(1)
except KeyboardInterrupt:
print('Caught Keyboard interrupt')
except:
e = sys.exc_info()
print('Caught Main Exception')
print(e)
self.StopStreaming()
cv2.destroyAllWindows()
def StopStreaming(self):
print('in stopCamStream')
if self.stopbit is not None:
self.stopbit.set()
while not self.cam_queue.empty():
try:
_ = self.cam_queue.get()
except:
break
self.cam_queue.close()
print("before camProcess.join()")
self.camProcess.join()
print("after camProcess.join()")
def BatchProcessing(self, b_num):
print('module name:', __name__)
if hasattr(os, 'getppid'): # only available on Unix
print('parent process:', os.getppid())
print('process id:', os.getpid())
if __name__ == "__main__":
mc = ProcessPipeline()
mc.BatchStreaming()
I used Event signalling as shown below.
That is more straightforward for my application.
When batching loop have enough images, signal to batch processing.
#event_tut.py
import random, time
from threading import Event, Thread
event = Event()
def waiter(event, nloops):
count=0
while(count<10):
print("%s. Waiting for the flag to be set." % (i+1))
event.wait() # Blocks until the flag becomes true.
print("Wait complete at:", time.ctime())
event.clear() # Resets the flag.
print('wait exit')
count=count+1
def setter(event, nloops):
for i in range(nloops):
time.sleep(random.randrange(2, 5)) # Sleeps for some time.
event.set()
threads = []
nloops = 10
threads.append(Thread(target=waiter, args=(event, nloops)))
threads[-1].start()
threads.append(Thread(target=setter, args=(event, nloops)))
threads[-1].start()
for thread in threads:
thread.join()
print("All done.")
I am writing a program in Python to run on my Raspberry Pi. As many people knows, Raspberry can receive many ways of input. I am using a keyboard and another external input source. This is just for contextualize, not really important for the question itself.
On my program, I wait for a keyboard input and if there is none during a short period of time, I skip and look for the input from the other source. In order to do this I am using the following code:
import sys
import time
from select import select
timeout = 4
prompt = "Type any number from 0 up to 9"
default = 99
def input_with(prompt, timeout, default):
"""Read an input from the user or timeout"""
print prompt,
sys.stdout.flush()
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = int(sys.stdin.read().replace('\n',''))
else:
s = default
print s
return s
I am going to run the Raspberry Pi without a full keyboard, this means I won't have the return key. It will be impossible to validate the keyboard input on this way.
My doubt is if it is possible to get the user input without pressing enter and keeping the timeout for the input.
I've seen many topics talking about both issues (timeout and input without pressing return) but nothing with both together.
Thanks in advance for any help !
I don't think it is straightforward to do it the way you want i.e. to read the waiting contents on the line, even if enter hasn't been pressed (that's right?).
The best suggestion I can offer is that you capture each character as it is pressed, and invoke once the time has passed. You can capture input on a per character basis by setting cbreak mode: tty.setcbreak()
import sys
from select import select
import tty
import termios
try:
# more correct to use monotonic time where available ...
from time33 import clock_gettime
def time(): return clock_gettime(0)
except ImportError:
# ... but plain old 'time' may be good enough if not.
from time import time
timeout = 4
prompt = "Type any number from 0 up to 9"
default = 99
def input_with(prompt, timeout, default):
"""Read an input from the user or timeout"""
print prompt,
sys.stdout.flush()
# store terminal settings
old_settings = termios.tcgetattr(sys.stdin)
buff = ''
try:
tty.setcbreak(sys.stdin) # flush per-character
break_time = time() + timeout
while True:
rlist, _, _ = select([sys.stdin], [], [], break_time - time())
if rlist:
c = sys.stdin.read(1)
# swallow CR (in case running on Windows)
if c == '\r':
continue
# newline EOL or EOF are also end of input
if c in ('\n', None, '\x04'):
break # newline is also end of input
buff += c
sys.stdout.write(c) # echo back
sys.stdout.flush()
else: # must have timed out
break
finally:
# put terminal back the way it was
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
if buff:
return int(buff.replace('\n','').strip())
else:
sys.stdout.write('%d' % default)
return default
I wrote a script to upload a video to YouTube using YouTube Data API v3 in the python with help of example given in Example code.
And I wrote another script to add uploaded video to playlist using same YouTube Data API v3 you can be seen here
After that I wrote a single script to upload video and add that video to playlist. In that I took care of authentication and scops still I am getting permission error. here is my new script
#!/usr/bin/python
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
CLIENT_SECRETS_FILE = "client_secrets.json"
# A limited OAuth 2 access scope that allows for uploading files, but not other
# types of account access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# Helpful message to display if the CLIENT_SECRETS_FILE is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://code.google.com/apis/console#access
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
def get_authenticated_service():
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(title,description,keywords,privacyStatus,file):
youtube = get_authenticated_service()
tags = None
if keywords:
tags = keywords.split(",")
insert_request = youtube.videos().insert(
part="snippet,status",
body=dict(
snippet=dict(
title=title,
description=description,
tags=tags,
categoryId='26'
),
status=dict(
privacyStatus=privacyStatus
)
),
# chunksize=-1 means that the entire file will be uploaded in a single
# HTTP request. (If the upload fails, it will still be retried where it
# left off.) This is usually a best practice, but if you're using Python
# older than 2.6 or if you're running on App Engine, you should set the
# chunksize to something like 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(file, chunksize=-1, resumable=True)
)
vid=resumable_upload(insert_request)
#Here I added lines to add video to playlist
#add_video_to_playlist(youtube,vid,"PL2JW1S4IMwYubm06iDKfDsmWVB-J8funQ")
#youtube = get_authenticated_service()
add_video_request=youtube.playlistItems().insert(
part="snippet",
body={
'snippet': {
'playlistId': "PL2JW1S4IMwYubm06iDKfDsmWVB-J8funQ",
'resourceId': {
'kind': 'youtube#video',
'videoId': vid
}
#'position': 0
}
}
).execute()
def resumable_upload(insert_request):
response = None
error = None
retry = 0
vid=None
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if 'id' in response:
print "'%s' (video id: %s) was successfully uploaded." % (
title, response['id'])
vid=response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
return vid
if __name__ == '__main__':
title="sample title"
description="sample description"
keywords="keyword1,keyword2,keyword3"
privacyStatus="public"
file="myfile.mp4"
vid=initialize_upload(title,description,keywords,privacyStatus,file)
print 'video ID is :',vid
I am not able to figure out what is wrong. I am getting permission error. both script works fine independently.
could anyone help me figure out where I am wrong or how to achieve uploading video and adding that too playlist.
I got the answer actually in both the independent script scope is different.
scope for uploading is "https://www.googleapis.com/auth/youtube.upload"
scope for adding to playlist is "https://www.googleapis.com/auth/youtube"
as scope is different so I had to handle authentication separately.
I am trying to call a function process in my settings but it is giving me this error:
Could not import the name: payusaepay.views.process
Here are my views:
# coding: utf-8
from decimal import Decimal, ROUND_UP, ROUND_DOWN
import re, time, math, random, hashlib
import urlparse, urllib, httplib, urllib2, pycurl
from django.conf import settings
from utils import logger
def trunc(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
return ('%.*f' % (n + 1, f))[:-1]
class Transaction(object):
"""USAePay Transaction Class"""
VALID_COMMANDS = set('sale credit void preauth postauth check checkcredit'.split())
VALID_REQUEST_KEYS = [
'UMamount',
'UMauthCode', # required if running postauth transaction.
'UMcard', # the entire amount that will be charged to the customers card
'UMexpir', # expiration date 4 digits no /
'UMbillamount',
'UMamount', # the entire amount that will be charged to the customers card
'UMinvoice', # invoice number. must be unique. limited to 10 digits. use orderid if you need longer.
'UMorderid', # invoice number. must be unique. limited to 10 digits. use orderid if you need longer.
'UMponum', # Purchase Order Number
'UMtax', # Tax
'UMnontaxable', # Order is non taxable. True -> 'Y', False -> do not specify this key
'UMtip', # Tip
'UMshipping', # Shipping charge
'UMdiscount', # Discount amount (ie gift certificate or coupon code)
'UMsubtotal', # if subtotal is set, then
'UMcurrency', # Currency of $amount
'UMname', # name of card holder
'UMstreet', # street address
'UMzip', # zip code
'UMdescription', # description of charge
'UMcomments', # Addit
'UMcvv2', # cvv2 result
'UMclerk',
'UMbillzip',
'UMshipstreet',
'UMxid',
'UMip', # Tip
'UMcheckimageback', # Check back, you need to encode it with base64
'UMtestmode', # test transaction but don't process
'UMcheckimagefront', # Check front, you need to encode it with base64
'UMrecurring', # (obsolete, see the addcustomer)
'UMdukpt', # DUK/PT for PIN Debit
'UMbillsourcekey', # True -> 'yes', False -> not specify this field
'UMshipzip',
'UMbillcity',
'UMbillstate',
'UMcardauth',
'UMshipstate',
'UMwebsite',
'UMallowPartialAuth', #set to 'Yes' if a partial authorization (less than the full $amount) will be accepted
'UMbillphone',
'UMepcCode',
'UMdlstate', # drivers license issuing state
'UMterminal', # The type of terminal being used: Optons are POS - cash register, StandAlone - self service terminal, Unattended - ie gas pump, Unkown (Default: Unknown)
'UMschedule', #
'UMbillstreet2',
'UMbillcompany',
'UMignoreDuplicate', # prevent the system from detecti
'UMrestaurant_table', #
'UMshipfname',
'UMmicr',
'UMaccounttype', # Checking or Savings
'UMsoftware', # Allows developers to ident
'UMaccount', # bank account numbertimuryan
'UMbillstreet',
'UMstart', # When to start the
'UMrefNum', # refer
'UMchecknum', # Check Number
'UMcustemail', # customers email add
'UMmagsupport', # Support for mag stripe reader: yes, no, contactless, unknown (default is unknown unless magstripe has been sent)
'UMauxonus',
'UMcontactless', # Support for mag stripe reader: yes, no, contactless, unknown (default is unknown unless magstripe has been sent)
'UMshiplname',
'UMsession',
'UMeci', # manually specify loc
'UMbilllname',
'UMaddcustomer', # (obsolete, see the addcustomer)
'UMsignature', # Signature Capture data
'UMnumleft', # The number of times to
'UMrouting', # bank routing number
'UMisRecurring', # True -> 'Y', False -> do not specify this key
'UMbillamount',
'UMshipcountry',
'UMbillcountry',
'UMbillfname',
'UMcustid', # Alpha-numeric id that uniquely identifies the customer.
'UMdigitalGoods', #digital goods indicator (ecommerce)
'UMtermtype', # The type of terminal being used: Optons are POS - cash register, StandAlone - self service terminal, Unattended - ie gas pump, Unkown (Default: Unknown)
'UMmagstripe', # mag stripe data. can be either Track 1, Track2 or Both (Required if card,exp,name,street and zip aren't filled in)
'UMcardpresent', # Must be set to '1' if processing a card present transaction (Default is false)
'UMshipstreet2',
'UMbilltax',
'UMshipcompany',
'UMcheckformat', # Override default check record format
'UMexpire', # When to stop running transactions. Default is to run forever. If both end and num
'UMfax',
'UMcustreceipt', # send customer a receipt
'UMemail', # customers email add
'UMshipphone',
'UMcustreceiptname', # name
'UMdlnum', # drivers license number (required if not using ssn)
'UMpares', #
'UMcavv',
'UMshipcity',
'UMssn', # social security number
'UMticketedEvent', # ID of Event when
]
VALID_CUSTOM_KEY_PATTERN = re.compile('UMcustom[\d+]')
_line_nums = '|'.join(str(i) for i in range(1, 21))
VALID_GOODS_KEYS_PATTERN = re.compile('UMline[%s][sku|name|description|cost|qty|taxable|prodRefNum]' % _line_nums)
class GoodsDescriptionError(Exception):
pass
class UnknownCommand(Exception):
pass
class DataError(Exception):
pass
class ResponseError(Exception):
pass
def __init__(self, UMkey = settings.USAEPAY_API_KEY,
UMpin = None,
gatewayurl = None,
usesandbox = settings.USAEPAY_USE_SANDBOX,
ignoresslcerterrors = settings.USAEPAY_IGNORE_SSL_CERT_ERRORS,
cabundle = settings.CABUNDLE,
usaepay_rsa_key = settings.USAEPAY_RSA_KEY,
UMtestmode = settings.USAEPAY_TEST_MODE):
self.gatewayurl = gatewayurl
self.usesandbox = usesandbox
self.UMkey = UMkey
self.UMpin = str(UMpin).strip() if UMpin else None
self.ignoresslcerterrors = ignoresslcerterrors
self.cabundle = cabundle
self.usaepay_rsa_key=usaepay_rsa_key
self.UMtestmode = UMtestmode
def get_gateway_url(self, usesandbox = False):
if self.usesandbox or usesandbox:
return "https://sandbox.usaepay.com/gate"
elif self.gatewayurl:
return self.gatewayurl
else:
return "https://www.usaepay.com/gate"
def check_command(self, command):
if command not in self.VALID_COMMANDS:
raise self.UnknownCommand(command)
def check_data(self, command, data):
""""
Verify that all required data has been set
"""
for key in data:
contains = key in self.VALID_REQUEST_KEYS or self.VALID_CUSTOM_KEY_PATTERN.match(key) or \
self.VALID_GOODS_KEYS_PATTERN.match(key)
if not contains:
raise self.DataError("Unknown key %s" % key)
if command in ('check', 'checkcredit'):
if not 'account' in data:
raise self.DataError("Account Number is required")
if not 'routing' in data:
raise self.DataError("Routing Number is required")
def prepair_data(self, command, initial_data, goods_lines):
# Goods descriptions:
# list ot dict(sku='', name='', description='', cost=0.0, qty=0, taxable='', prodRefNum=0)
data = initial_data.copy()
data['UMkey'] = self.UMkey
if 'UMcheckimagefront' in data or 'UMcheckimageback' in data:
data['UMcheckimageencoding'] = 'base64'
if self.UMtestmode:
data.setdefault('UMtestmode', self.UMtestmode)
for index, line in enumerate(goods_lines, 1):
for key in line:
data['UMline%s%s' % (index, key)] = line[key]
# Create hash if pin has been set.
if self.UMpin:
# generate random seed value
seed = trunc(time.time(), 4) + trunc(random.random(), 10)[2:]
# assemble prehash data
prehash = ':'.join( (command, self.UMpin, str(data['UMamount']), str(data['UMinvoice']), seed) )
# sha224 hash
data['UMhash'] = 's/' + seed + '/' + hashlib.sha1(prehash).hexdigest()
return data
def process(self, command, initial_data, goods_lines = [], usesandbox = False):
"""
Send transaction correspond to 'command' name to the USAePay Gateway and parse response
goods_lines - list with good item descriptions
"""
data = self.prepair_data(command, initial_data, goods_lines)
gatewayurl = self.get_gateway_url(usesandbox)
self.check_command(command)
self.check_data(command, initial_data)
# Post data to Gateway
response=self.http_post(gatewayurl, data)
if response.status != 200:
raise self.ResponseError('Wrong response status %s', response.status)
# decode response content
result = urlparse.parse_qs(response.content)
logger.debug('Decoded response: %s', result)
# Check response errors
if not 'UMversion' in result:
raise self.ResponseError('UMversion is missed in the response')
if not 'UMstatus' in result:
raise self.ResponseError('UMstatus is missed in the response')
error_description = tuple(result.get(key, None) for key in 'UMerrorcode UMerror UMcustnum'.split())
if any(error_description):
raise self.ResponseError('Errors : %s %s %s' % error_description)
if not result['UMresult'] == ['A']:
raise self.ResponseError('Wrong response status %s' % result['UMresult'])
return result
def http_post(self, url, data):
'Post request to payment server via stream'
data = urllib.urlencode(data)
logger.debug('stream post request: %s', data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Content-length": len(data)
}
conn = httplib.HTTPSConnection("sandbox.usaepay.com", 443, self.usaepay_rsa_key, self.cabundle, False, 45)
conn.request("POST", "/gate.php", data, headers)
response = conn.getresponse()
response.content = response.read()
logger.debug('stream post response: status=%s %s', response.status, response.content)
return response
in settings
SHOP_HANDLER_PAYMENT = "payusaepay.views.process"
Because of process is not a view method, it is a method of Transaction class, so you could have an instance of Transaction to call process method.