When I use django admin to delete some users, it returns this error:
ERROR base handle_uncaught_exception Internal Server Error: /admin/auth/user/
Traceback (most recent call last):
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/django/core/handlers/base.py", line 235, in get_response
response = middleware_method(request, response)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/middleware.py", line 44, in process_response
parser=parser)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 46, in html_minify
mini_soup = space_minify(soup, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 69, in space_minify
space_minify(child, ignore_comments)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/htmlmin/minify.py", line 101, in space_minify
soup.string.replace_with(new_string)
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/bs4/element.py", line 231, in replace_with
self.extract()
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/bs4/element.py", line 258, in extract
del self.parent.contents[self.parent.index(self)]
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/bs4/element.py", line 938, in index
for i, child in enumerate(self.contents):
File "/home/yuanyin/.virtualenvs/transcats/lib/python3.5/site-packages/gunicorn/workers/base.py", line 192, in handle_abort
sys.exit(1)
SystemExit: 1
But get this: it doesn't appear all the time. When I try to delete some users, it doesn't show up; it appears when I try to delete others. What's going on?
gunicorn might be aborting a very long request. Try setting --timeout to a higher value.
Related
I get this strange error on Python2.7. It works fine with Python3:
Traceback (most recent call last):
File "/home/guettli/descript/projects/descript_jugendhaus/.tox/py27-django14/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 280, in <module>
main()
File "/home/guettli/descript/projects/descript_jugendhaus/.tox/py27-django14/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 263, in main
json_out['return_val'] = hook(**hook_input['kwargs'])
File "/home/guettli/descript/projects/descript_jugendhaus/.tox/py27-django14/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 133, in prepare_metadata_for_build_wheel
return hook(metadata_directory, config_settings)
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 156, in prepare_metadata_for_build_wheel
self.run_setup()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 243, in run_setup
self).run_setup(setup_script=setup_script)
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 142, in run_setup
exec(compile(code, __file__, 'exec'), locals())
File "setup.py", line 195, in <module>
distclass=BinaryDistribution,
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/__init__.py", line 162, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python2.7/distutils/core.py", line 151, in setup
dist.run_commands()
File "/usr/lib/python2.7/distutils/dist.py", line 953, in run_commands
self.run_command(cmd)
File "/usr/lib/python2.7/distutils/dist.py", line 972, in run_command
cmd_obj.run()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/dist_info.py", line 31, in run
egg_info.run()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/egg_info.py", line 296, in run
self.find_sources()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/egg_info.py", line 303, in find_sources
mm.run()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/egg_info.py", line 534, in run
self.add_defaults()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/egg_info.py", line 570, in add_defaults
sdist.add_defaults(self)
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/py36compat.py", line 36, in add_defaults
self._add_defaults_ext()
File "/tmp/pip-build-env-sy2MSY/overlay/lib/python2.7/site-packages/setuptools/command/py36compat.py", line 118, in _add_defaults_ext
if self.distribution.has_ext_modules():
File "setup.py", line 109, in has_ext_modules
return super().has_ext_modules() or 'SETUPPY_ALLOW_PURE' not in os.environ
TypeError: super() takes at least 1 argument (0 given)
What could be the root-cause?
I found a solution.
If I disable the python-hunter library it works.
I guess python-hunter is not compatible with Python2.7 any more.
That's fine for my use case. I this case I can remove the library (although it is a great tracing library).
I don't know if this is relevant, but the supercomputer operators conducted an upgrade of Booster module yesterday. After that my tensorflow scripts, which were working perfectly fine before that, raise the following error:
2018-06-30 02:21:11.787262: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
Traceback (most recent call last):
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/bin/deep-ScaffOpt.py", line 524, in <module>
serial_RF=False))
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/bin/deep-ScaffOpt.py", line 189, in train_MLP
MLP = deepMetaPredictor().combinePredictors_datatypes(datasets, mat, serial_RF=serial_RF, META_ZCUTOFF=datasets.args.META_ZCUTOFF)
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/deepMetaPredictor.py", line 169, in combinePredictors_datatypes
mlp.fit(datasets.x_crossval['lhl'], datasets.y_crossval['lhl'])
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/ANN_functions.py", line 324, in fit
_, c, p = self.sess.run([self.optimizer, self.cost, self.pred], feed_dict={self.x: batch_x, self.y: batch_y})
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 900, in run
run_metadata_ptr)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1135, in _run
feed_dict_tensor, options, run_metadata)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1316, in _do_run
run_metadata)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1335, in _do_call
raise type(e)(node_def, op, message)
InvalidArgumentError: Expected size[0] in [0, 150], but got 300
[[Node: Slice = Slice[Index=DT_INT32, T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_1_0_1, Slice/begin, gradients/sub_grad/Shape_1)]]
Caused by op u'Slice', defined at:
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/_control.py", line 127, in runFuture
future.resultValue = future.callable(*future.args, **future.kargs)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 252, in run_path
return _run_module_code(code, init_globals, run_name, path_name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 82, in _run_module_code
mod_name, mod_fname, mod_loader, pkg_name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/bin/deep-ScaffOpt.py", line 524, in <module>
serial_RF=False))
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/bin/deep-ScaffOpt.py", line 189, in train_MLP
MLP = deepMetaPredictor().combinePredictors_datatypes(datasets, mat, serial_RF=serial_RF, META_ZCUTOFF=datasets.args.META_ZCUTOFF)
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/deepMetaPredictor.py", line 167, in combinePredictors_datatypes
random_state=datasets.random_state)
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/ANN_functions.py", line 235, in __init__
self.cost = tf_group_RMSE(self.y, self.pred, matrices.assaysize_vec, matrices.group_matrix) \
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/ConsScoreTK_Statistics.py", line 1261, in tf_group_RMSE
Y = tf.slice(Y, [0], [b_molnum])
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 650, in slice
return gen_array_ops._slice(input_, begin, size, name=name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 7093, in _slice
"Slice", input=input, begin=begin, size=size, name=name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3392, in create_op
op_def=op_def)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1718, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Expected size[0] in [0, 150], but got 300
[[Node: Slice = Slice[Index=DT_INT32, T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_1_0_1, Slice/begin, gradients/sub_grad/Shape_1)]]
Traceback (most recent call last):
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/bootstrap/__main__.py", line 302, in <module>
b.main()
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/bootstrap/__main__.py", line 92, in main
self.run()
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/bootstrap/__main__.py", line 290, in run
futures_startup()
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/bootstrap/__main__.py", line 271, in futures_startup
run_name="__main__"
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/futures.py", line 64, in _startup
result = _controller.switch(rootFuture, *args, **kargs)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/_control.py", line 253, in runController
raise future.exceptionValue
tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected size[0] in [0, 150], but got 300
[[Node: Slice = Slice[Index=DT_INT32, T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_1_0_1, Slice/begin, gradients/sub_grad/Shape_1)]]
Caused by op u'Slice', defined at:
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/scoop/_control.py", line 127, in runFuture
future.resultValue = future.callable(*future.args, **future.kargs)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 252, in run_path
return _run_module_code(code, init_globals, run_name, path_name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 82, in _run_module_code
mod_name, mod_fname, mod_loader, pkg_name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/bin/deep-ScaffOpt.py", line 524, in <module>
serial_RF=False))
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/bin/deep-ScaffOpt.py", line 189, in train_MLP
MLP = deepMetaPredictor().combinePredictors_datatypes(datasets, mat, serial_RF=serial_RF, META_ZCUTOFF=datasets.args.META_ZCUTOFF)
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/deepMetaPredictor.py", line 167, in combinePredictors_datatypes
random_state=datasets.random_state)
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/ANN_functions.py", line 235, in __init__
self.cost = tf_group_RMSE(self.y, self.pred, matrices.assaysize_vec, matrices.group_matrix) \
File "/homeb/eusmi01/eusmi0100/Programs/consscortk/lib/ConsScoreTK_Statistics.py", line 1261, in tf_group_RMSE
Y = tf.slice(Y, [0], [b_molnum])
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 650, in slice
return gen_array_ops._slice(input_, begin, size, name=name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 7093, in _slice
"Slice", input=input, begin=begin, size=size, name=name)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3392, in create_op
op_def=op_def)
File "/homeb/eusmi01/eusmi0100/Programs/Miniconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1718, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Expected size[0] in [0, 150], but got 300
[[Node: Slice = Slice[Index=DT_INT32, T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_1_0_1, Slice/begin, gradients/sub_grad/Shape_1)]]
The version of Tensorflow that I use is 1.8.0. For the record, the same code works perfectly fine on my laptop where I have 1.4.0-dev version installed. Could anyone enlighten me about the source of the error?
I am using the flower tutorial code in cloudml-samples trying to implement a multi-label classification on a set of restaurant photos.
I have the dict.txt and input updated accordingly and here are the sample lines.
dict.txt
good_for_lunch
good_for_dinner
takes_reservations
outdoor_seating
restaurant_is_expensive
has_alcohol
has_table_service
ambience_is_classy
good_for_kids
eval_set.csv
...
gs://yelp_restaurant_photo_classification/train_photos/312753.jpg,good_for_dinner,takes_reservations,has_alcohol,has_table_service,good_for_kids
gs://yelp_restaurant_photo_classification/train_photos/342651.jpg,good_for_lunch,good_for_dinner,outdoor_seating,good_for_kids
gs://yelp_restaurant_photo_classification/train_photos/217079.jpg,takes_reservations,has_table_service
...
Preprocess job started running fine, then I see this specific error keeps coming up, until job failed.
python trainer/preprocess.py \
--input_dict "$DICT_FILE" \
--input_path "gs://yelp_restaurant_photo_classification/labels/eval_set.csv" \
--output_path "${GCS_PATH}/preproc/eval" \
--cloud
Job Logs - KeyError: u"FALSE [while running 'Extract label ids']"
(d8285fa55cb6ab07): Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 514, in do_work
work_executor.execute()
File "dataflow_worker/executor.py", line 894, in dataflow_worker.executor.MapTaskExecutor.execute (dataflow_worker/executor.c:24204)
op.start()
File "dataflow_worker/executor.py", line 197, in dataflow_worker.executor.ReadOperation.start (dataflow_worker/executor.c:7039)
def start(self):
File "dataflow_worker/executor.py", line 202, in dataflow_worker.executor.ReadOperation.start (dataflow_worker/executor.c:6946)
with self.spec.source.reader() as reader:
File "dataflow_worker/executor.py", line 212, in dataflow_worker.executor.ReadOperation.start (dataflow_worker/executor.c:6891)
self.output(windowed_value)
File "dataflow_worker/executor.py", line 142, in dataflow_worker.executor.Operation.output (dataflow_worker/executor.c:5249)
cython.cast(Receiver, self.receivers[output_index]).receive(windowed_value)
File "dataflow_worker/executor.py", line 89, in dataflow_worker.executor.ConsumerSet.receive (dataflow_worker/executor.c:3487)
cython.cast(Operation, consumer).process(windowed_value)
File "dataflow_worker/executor.py", line 500, in dataflow_worker.executor.DoOperation.process (dataflow_worker/executor.c:14239)
self.dofn_receiver.receive(o)
File "apache_beam/runners/common.py", line 134, in apache_beam.runners.common.DoFnRunner.receive (apache_beam/runners/common.c:4172)
self.process(windowed_value)
File "apache_beam/runners/common.py", line 168, in apache_beam.runners.common.DoFnRunner.process (apache_beam/runners/common.c:5282)
self.reraise_augmented(exn)
File "apache_beam/runners/common.py", line 181, in apache_beam.runners.common.DoFnRunner.reraise_augmented (apache_beam/runners/common.c:5665)
raise
File "apache_beam/runners/common.py", line 166, in apache_beam.runners.common.DoFnRunner.process (apache_beam/runners/common.c:5218)
self._process_outputs(element, self.dofn_process(self.context))
File "apache_beam/runners/common.py", line 222, in apache_beam.runners.common.DoFnRunner._process_outputs (apache_beam/runners/common.c:6400)
self.main_receivers.receive(windowed_value)
File "dataflow_worker/executor.py", line 89, in dataflow_worker.executor.ConsumerSet.receive (dataflow_worker/executor.c:3487)
cython.cast(Operation, consumer).process(windowed_value)
File "dataflow_worker/executor.py", line 500, in dataflow_worker.executor.DoOperation.process (dataflow_worker/executor.c:14239)
self.dofn_receiver.receive(o)
File "apache_beam/runners/common.py", line 134, in apache_beam.runners.common.DoFnRunner.receive (apache_beam/runners/common.c:4172)
self.process(windowed_value)
File "apache_beam/runners/common.py", line 168, in apache_beam.runners.common.DoFnRunner.process (apache_beam/runners/common.c:5282)
self.reraise_augmented(exn)
File "apache_beam/runners/common.py", line 179, in apache_beam.runners.common.DoFnRunner.reraise_augmented (apache_beam/runners/common.c:5646)
raise type(exn), args, sys.exc_info()[2]
File "apache_beam/runners/common.py", line 166, in apache_beam.runners.common.DoFnRunner.process (apache_beam/runners/common.c:5218)
self._process_outputs(element, self.dofn_process(self.context))
File "apache_beam/runners/common.py", line 191, in apache_beam.runners.common.DoFnRunner._process_outputs (apache_beam/runners/common.c:5838)
for result in results:
File "trainer/preprocess.py", line 130, in process
KeyError: u"FALSE [while running 'Extract label ids']"
Job Logs - Workflow failed
(f3c7c09c0b6a453c): Workflow failed. Causes: (688819c5d32d79c8): S06:Read input+Parse input+Extract label ids+Read and convert to JPEG+Embed and make TFExample+Save to disk/Write to gs:__yelp_restaurant_photo_classification_yelp_restaurant_photo_classification_preproc_eval/Write/WriteImpl/write_bundles+Save to disk/Write to gs:__yelp_restaurant_photo_classification_yelp_restaurant_photo_classification_preproc_eval/Write/WriteImpl/pair+Save to disk/Write to gs:__yelp_restaurant_photo_classification_yelp_restaurant_photo_classification_preproc_eval/Write/WriteImpl/WindowInto+Save to disk/Write to gs:__yelp_restaurant_photo_classification_yelp_restaurant_photo_classification_preproc_eval/Write/WriteImpl/GroupByKey/Reify+Save to disk/Write to gs:__yelp_restaurant_photo_classification_yelp_restaurant_photo_classification_preproc_eval/Write/WriteImpl/GroupByKey/Write failed.
You probably have a row in your input CSV file where the label is 'FALSE', but 'FALSE' is not in 'dict.txt'.
I am trying to install scrapy 0.24 in freebsd (MariaDB) system but when I try to run it I have an "keyError: 'z'" which I don't know what it means... I tried to debug it with no success.
File "/usr/local/bin/scrapy", line 9, in <module>
load_entry_point('Scrapy==0.24.4', 'console_scripts', 'scrapy')()
File "/usr/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 143, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/usr/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 89, in _run_print_help
func(*a, **kw)
File "/usr/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 150, in _run_command
cmd.run(args, opts)
File "/usr/local/lib/python2.7/site-packages/scrapy/commands/crawl.py", line 60, in run
self.crawler_process.start()
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 92, in start
if self.start_crawling():
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 124, in start_crawling
return self._start_crawler() is not None
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 139, in _start_crawler
crawler.configure()
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 47, in configure
self.engine = ExecutionEngine(self, self._spider_closed)
File "/usr/local/lib/python2.7/site-packages/scrapy/core/engine.py", line 65, in __init__
self.scraper = Scraper(crawler)
File "/usr/local/lib/python2.7/site-packages/scrapy/core/scraper.py", line 66, in __init__
self.itemproc = itemproc_cls.from_crawler(crawler)
File "/usr/local/lib/python2.7/site-packages/scrapy/middleware.py", line 50, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python2.7/site-packages/scrapy/middleware.py", line 31, in from_settings
mw = mwcls.from_crawler(crawler)
File "/usr/local/lib/python2.7/site-packages/scrapy/contrib/pipeline/media.py", line 29, in from_crawler
pipe = cls.from_settings(crawler.settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/contrib/pipeline/images.py", line 52, in from_settings
return cls(store_uri)
File "/usr/local/lib/python2.7/site-packages/scrapy/contrib/pipeline/files.py", line 150, in __init__
self.store = self._get_store(store_uri)
File "/usr/local/lib/python2.7/site-packages/scrapy/contrib/pipeline/files.py", line 170, in _get_store
store_cls = self.STORE_SCHEMES[scheme]
KeyError: 'z'
I'll try to install also scrapy 0.22 in freebsd just in case that could be the problem
Thanks a lot!!
Please tell me how solve this
I try install Install and configure a Sentry Service on a CentOS, and when i try start sentry service i get this error:
[root#felipeurrego ~]# source /var/www/sentry/bin/activate
(sentry)[root#felipeurrego ~]# sentry --config=/etc/sentry.conf.py start
Traceback (most recent call last):
File "/var/www/sentry/bin/sentry", line 8, in <module>
load_entry_point('sentry==5.4.5', 'console_scripts', 'sentry')()
File "/var/www/sentry/lib/python2.6/site-packages/sentry-5.4.5-py2.6.egg/sentry/utils/runner.py", line 197, in main
initializer=initialize_app,
File "/var/www/sentry/lib/python2.6/site-packages/logan-0.5.5-py2.6.egg/logan/runner.py", line 155, in run_app
management.execute_from_command_line([runner_name, command] + command_args)
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/core/management/__init__.py", line 443, in execute_from_command_line
utility.execute()
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/core/management/__init__.py", line 382, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/core/management/__init__.py", line 252, in fetch_command
app_name = get_commands()[subcommand]
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/core/management/__init__.py", line 101, in get_commands
apps = settings.INSTALLED_APPS
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/utils/functional.py", line 184, in inner
self._setup()
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/conf/__init__.py", line 42, in _setup
self._wrapped = Settings(settings_module)
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/conf/__init__.py", line 93, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "/var/www/sentry/lib/python2.6/site-packages/Django-1.4.5-py2.6.egg/django/utils/importlib.py", line 35, in import_module
__import__(name)
File "/var/www/sentry/lib/python2.6/site-packages/logan-0.5.5-py2.6.egg/logan/importer.py", line 68, in load_module
return self._load_module(fullname)
File "/var/www/sentry/lib/python2.6/site-packages/logan-0.5.5-py2.6.egg/logan/importer.py", line 92, in _load_module
load_settings(self.config_path, allow_extras=self.allow_extras, settings=settings_mod)
File "/var/www/sentry/lib/python2.6/site-packages/logan-0.5.5-py2.6.egg/logan/settings.py", line 49, in load_settings
execfile(mod_or_filename, conf.__dict__)
django.core.exceptions.ImproperlyConfigured: IndentationError('unexpected indent', ('/etc/sentry.conf.py', 58, 1, " SENTRY_URL_PREFIX = 'http://logs.felipeurrego.com'\n"))
And thats all
It's very clear:
django.core.exceptions.ImproperlyConfigured: IndentationError('unexpected indent', ('/etc/sentry.conf.py', 58, 1, " SENTRY_URL_PREFIX = 'http://logs.felipeurrego.com'\n"))
You have a unwarranted space before the configuration variable SENTRY_URL_PREFIX in your /etc/sentry.conf.py. Python requires that everything is uniformly indented in your source code files.