I am trying to create a website for myself. It is kind of a Youtube on a local network. I am using video.js (have also tried Plyr.io) to play the video, but i can not fast forward the video. Or go back in the video. I can only play it from begining to the end. If i try to skip forward it only resets. What am I doing wrong?
Thanks in advance!
The behaviour sounds like the server doesn't implement range headers. When you try to seek, it returns the start of the file and not the part requested. If you try Safari you'll probably find it won't play at all. Check questions like Byte Ranges in Django
Yes, I all face the similar issue in using the video js library. But with the help from Byte range in django I solve this issue in video.js library by adding the RangeMiddleware. But I can skip or forward the video.
class RangesMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if response.status_code != 200 or not hasattr(response, "file_to_stream"):
return response
http_range = request.META.get("HTTP_RANGE")
if not (
http_range
and http_range.startswith("bytes=")
and http_range.count("-") == 1
):
return response
if_range = request.META.get("HTTP_IF_RANGE")
if (
if_range
and if_range != response.get("Last-Modified")
and if_range != response.get("ETag")
):
return response
f = response.file_to_stream
statobj = os.fstat(f.fileno())
start, end = http_range.split("=")[1].split("-")
if not start: # requesting the last N bytes
start = max(0, statobj.st_size - int(end))
end = ""
start, end = int(start or 0), int(end or statobj.st_size - 1)
assert 0 <= start < statobj.st_size, (start, statobj.st_size)
end = min(end, statobj.st_size - 1)
f.seek(start)
old_read = f.read
f.read = lambda n: old_read(min(n, end + 1 - f.tell()))
response.status_code = 206
response["Content-Length"] = end + 1 - start
response["Content-Range"] = "bytes %d-%d/%d" % (start, end, statobj.st_size)
return response
Related
I am new in coding. I have a question. I tried to run selenium for scraping data with 13 pages in total. Unfortunately, after page 13, the loop is still running, and I don't know how to stop the loop. This is the error: list index out of range when running Selenium, please I need your help. Thank you !
This is the code that I make
txt = driver
.find_element(By.XPATH,'//*[#id="searchResultsCount"]').text
print(txt)
print(txt.split{' '))
def result_status(driver):
txt = driver.find_e1ement(By.XPATH,'//*{#id="searchResultsCount"]').text
current = txt.sp1it(' ')[1]
end = txt.sp1it(' ')[3]
return current, end, driver
def next_page(driver):
pg_elems = driver.find_elements(By.CLASS_NAME,'page-link') #<a href="#page-274"
nxt_elem = [x for x in pg_elems if x.text == 'Next‘][#]
nxt_elem.click()
time.sleep(2)
return driver
driver = next_page(driver)
results_df = pd.DataFrame()
# Put it all together (From Matt)
# Get current resuLts
current, end, driver = result_status(driver)
#Loop through resuLts
i=0
while current != end:
i += 1
if i%2 == 0:
results = driver.find_element(By.ID,'searchResultsArea')
results_html = results.get_attribute('innerHTHL')
temp = pd.read_html(results_html)[0]
results_df = pd.concat([results_df,temp], ignore_index=True)
results_df.to_csv("results.csv", index=False)
#Check Status
current, end, driver = result_status(driver)
print(current,'|',end='')
#Go to next page
driver = next_page(driver)
if i == 660:
break
results = driver.find_e1ement(By.ID,'overSearchResults')
results_html = results.get_attribute('innerHTML')
df = pd.read_htm1(results_html
And this is the error
IndexError Traceback (most recent call last)
~\AppData\Loca1\Temp/ipykernel_37444/2741504647.py in <module>
21
22 #Go to next page
---> 23 driver = next_page(driver)
24 it i == 690:
25 break
I would like to upload files between 100-200mb using Flask on PythonAnywhere (which has a 100mb upload limit). I am trying to implement chunking but am still getting a 413 Request Entity Too Large error and am unsure why. I assume it has to do with using open(large_file) but am unsure how to get around this. Any help would be apprecaited?
All the files in /home/zweiss124/mysite/files that end in .fastq are 100-200mb.
def read_in_chunks(file_object, chunk_size=10000):
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def get_uploaded_files():
global uploaded_files
uploaded_files = []
global num_chunks
num_chunks = []
os.chdir("/home/zweiss124/mysite/files")
#for file in shared_dict["filenames"]:
for file in glob.glob("*.fastq"):
f = open(file)
index = 0
offset = 0
headers = {}
all_chunks = ''
ind_num_chunks = 0
for chunk in read_in_chunks(f):
offset = index + len(chunk)
headers['Content-Type'] = 'application/octet-stream'
index = offset
all_chunks+=chunk
ind_num_chunks+=1
num_chunks.append(ind_num_chunks)
uploaded_files.append(all_chunks.split("\n")[:4*shared_dict["num_to_upload"]])
return uploaded_files, num_chunks
I have python code that acquires serial data from 2 devices and writes to a .txt file. Every 4-15 minutes there is approx 30-45 seconds of data missing in the .txt file and this is not acceptable for our use case. I've spent hours googling and searching SO about multiprocessing and serial port data acquisition and haven't come up with a solution.
Here is my code
gpsser = input(("Enter GPS comport as 'COM_': "))
ser = serial.Serial(port=gpsser,
baudrate=38400,
timeout=2,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
root = Tk()
root.title("DualEM DAQ")
path = filedialog.asksaveasfilename() + ".txt"
file = glob.glob(path)
filename = path
with open(filename, 'wb') as f:
w = csv.writer(f, dialect='excel')
w.writerow(['header'])
def sensor():
while True:
try:
NMEA1 = dser.readline().decode("ascii")
while dser.inWaiting() == 0:
pass
NMEA1_array = NMEA1.split(',')
NMEA2_array = NMEA2.split(',')
NMEA3_array = NMEA3.split(',')
NMEA4_array = NMEA4.split(',')
if NMEA1_array[0] == '$PDLGH':
value1 = NMEA1_array[2]
value2 = NMEA1_array[4]
if NMEA1_array[0] == '$PDLG1':
value3 = NMEA1_array[2]
value4 = NMEA1_array[4]
if NMEA1_array[0] == '$PDLG2':
value5 = NMEA1_array[2]
value6 = NMEA1_array[4]
return (float(value1), float(value2), float(value3),
float(value4), float(value5), float(value6),
except (IndexError, NameError, ValueError, UnicodeDecodeError):
pass
def gps():
while True:
try:
global Status, Latitude, Longitude, Speed, Truecourse, Date
global GPSQuality, Satellites, HDOP, Elevation, Time
while ser.inWaiting() == 0:
pass
msg = ser.readline()
pNMEA = pynmea2.parse(msg)
if isinstance(pNMEA, pynmea2.types.talker.RMC):
Latitude = pynmea2.dm_to_sd(pNMEA.lat)
Longitude = -(pynmea2.dm_to_sd(pNMEA.lon))
Date = pNMEA.datestamp
Time = datetime.datetime.now().time()
if () is not None:
return (Longitude, Latitude, Date, Time)
except (ValueError, UnboundLocalError, NameError):
pass
while True:
try:
with open(filename, "ab") as f:
data = [(gps() + sensor())]
writer = csv.writer(f, delimiter=",", dialect='excel')
writer.writerows(data)
f.flush()
print(data)
except (AttributeError, TypeError) as e:
pass
The program is writing to the file but I need help understanding why I'm losing 30-45 seconds of data every so often. Where is my bottle neck that is causing this to happen?
Here is an example of where the breaks are, note the breaks are approx 50 seconds in this case.
Breaks in writing data to csv
DB
Back when I used PySerial, I did this:
nbytes = ser.inWaiting()
if nbytes > 0:
indata = ser.read(nbytes)
#now parse bytes in indata to look for delimiter, \n in your case
#and if found process the input line(s) until delimiter not found
else:
#no input yet, do other processing or allow other things to run
#by using time.sleep()
Also note that new versions (3.0+) of PySerial have .in_waiting as a property not a method, so no (), it used to be .inWaiting().
You should not flush the serial port input. Data is arriving on its own timing into a buffer in the driver, not when your read happens, so you are throwing away data with your flush. You may need to add code to synchronize with the input stream.
I used threading with a queue and changed my mainloop to look like this.
while True:
try:
with open(filename, "ab") as f:
writer = csv.writer(f, delimiter=",", dialect='excel')
data = []
data.extend(gpsdata())
data.extend(dualemdata())
writer.writerows([data])
f.flush()
f.close()
dser.flushInput()
ser.flushInput()
print(data)
sleep(0.05)
except (AttributeError, TypeError) as e:
pass
I had to flush the serial port input data before the looping back to the read functions so it was reading new realtime data(this eliminated any lag of the incoming data stream). I've ran a 30 minute test and the time gaps appear to have go away. Thankyou to Cmaster for giving me some diagnostic ideas.
Edit: I extended the question to HTTPS. The "S" part is not yet solved and I don't really need it, but it might be interesting for others.
I want to do the equivalent of
hdr='GET / HTTP/1.1\r\nContent-Length: 10000000000\r\n\r\n'
(echo -en "$hdr"; dd if=/dev/zero bs=1000000 count=999; read tmp) | nc $SOME_IP 80
with Python 2.7. If possible, I'd like to use only standard library plus the requests and sockets modules.
FYI, the above script sends a large HTTP request (~1GB of zeros) to $SOME_IP without being to heavy on the senders RAM.
Something like this?
import socket
def get_content_length():
return 10000000000
def get_chunk_size():
return 1000000
def yield_content():
total_size = get_content_length()
chunk_size = get_chunk_size()
current_size = 0
end = total_size - chunk_size
chunk = '\x00' * chunk_size
while current_size < end:
yield chunk
current_size += chunk_size
yield chunk[:total_size - current_size]
s = socket.socket()
s.connect((host, port))
hdr='GET / HTTP/1.1\r\nContent-Length: %s\r\n\r\n' % get_content_length()
s.sendall(hdr)
for chunk in yield_content():
s.sendall(chunk)
# should I wait for the response?
Here is what I cooked up and seems to work:
import sys, socket
def flood(hostname, port, count=1):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
s.sendall('GET / HTTP/1.1\r\nContent-Length: 10000000000\r\n\r\n')
with open('/dev/zero', 'rb', ) as data:
for _ in xrange(count):
s.sendall(data.read(1000000))
s.shutdown(socket.SHUT_WR)
while True:
data = s.recv(1024)
if data == "":
break
print("Received:", repr(data))
print("Connection closed.")
s.close()
The answer by freakish is of course more cross-plattform, as it does not require /dev/null
Unsure of why I am getting this error. I'm reading from a file called columns_unsorted.txt, then trying to write to columns_unsorted.txt. There error is on fan_on = string_j[1], saying list index out of range. Here's my code:
#!/usr/bin/python
import fileinput
import collections
# open document to record results into
j = open('./columns_unsorted.txt', 'r')
# note this is a file of rows of space-delimited date in the format <1384055277275353 0 0 0 1 0 0 0 0 22:47:57> on each row, the first term being unix times, the last human time, the middle binary indicating which machine event happened
# open document to read from
l = open('./columns_sorted.txt', 'w')
# CREATE ARRAY CALLED EVENTS
events = collections.deque()
i = 1
# FILL ARRAY WITH "FACTS" ROWS; SPLIT INTO FIELDS, CHANGE TYPES AS APPROPRIATE
for line in j: # columns_unsorted
line = line.rstrip('\n')
string_j = line.split(' ')
time = str(string_j[0])
fan_on = int(string_j[1])
fan_off = int(string_j[2])
heater_on = int(string_j[3])
heater_off = int(string_j[4])
space_on = int(string_j[5])
space_off = int(string_j[6])
pump_on = int(string_j[7])
pump_off = int(string_j[8])
event_time = str(string_j[9])
row = time, fan_on, fan_off, heater_on, heater_off, space_on, space_off, pump_on, pump_off, event_time
events.append(row)
You are missing the readlines function, no?
You have to do:
j = open('./columns_unsorted.txt', 'r')
l = j.readlines()
for line in l:
# what you want to do with each line
In the future, you should print some of your variables, just to be sure the code is working as you want it to, and to help you identifying problems.
(for example, if in your code you would print string_j you would see what kind of problem you have)
Problem was an inconsistent line in the data file. Forgive my haste in posting