My application works properly on the local machine. However, as I uploaded the application to elastic beanstalk, the import of librosa library broke the application. How to solve the issue?
import os
import pandas as pd
import librosa
import numpy as np
from sklearn.preprocessing import StandardScaler
# split training and test data
from sklearn.model_selection import train_test_split
from flask import Flask
from flask import request
from flask import Response
# EB looks for an 'application' callable by default.
application = Flask(__name__)
#application.route('/', methods=['Get'])
def homepage():
return "Hello World"
# run the app.
if __name__ == "__main__":
# Setting debug to True enables debug output. This line should be
# removed before deploying a production app.
application.debug = True
application.run()
I am using Python 3.8 on Amazon linux 2. The packages are installed using requirements.txt :
flask==1.1.2
flask_cors==3.0.10
imageio==2.9.0
librosa==0.8.1
moviepy==1.0.3
numpy==1.19.0
pandas==1.2.3
scikit-learn==0.24.1
tensorflow==2.2.0
werkzeug==1.0.1
The log is given below:
2022/02/11 20:22:31.051794 [ERROR] An error occurred during execution of command [app-deploy] - [InstallDependency]. Stop running the command. Error: fail to install dependencies with requirements.txt file with error Command /bin/sh -c /var/app/venv/staging-LQM1lest/bin/pip install -r requirements.txt failed with error exit status 2. Stderr:ERROR: Exception:
Traceback (most recent call last):
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/cli/base_command.py", line 164, in exc_logging_wrapper
status = run_func(*args)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/cli/req_command.py", line 205, in wrapper
return func(self, options, args)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/commands/install.py", line 339, in run
reqs, check_supported_wheels=not options.target_dir
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/resolver.py", line 93, in resolve
collected.requirements, max_rounds=try_to_avoid_resolution_too_deep
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/resolvelib/resolvers.py", line 482, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/resolvelib/resolvers.py", line 349, in resolve
self._add_to_criteria(self.state.criteria, r, parent=None)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/resolvelib/resolvers.py", line 173, in _add_to_criteria
if not criterion.candidates:
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/resolvelib/structs.py", line 151, in __bool__
return bool(self._sequence)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", line 155, in __bool__
return any(self)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", line 143, in <genexpr>
return (c for c in iterator if id(c) not in self._incompatible_ids)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", line 47, in _iter_built
candidate = func()
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/factory.py", line 206, in _make_candidate_from_link
version=version,
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 287, in __init__
version=version,
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 156, in __init__
self.dist = self._prepare()
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 225, in _prepare
dist = self._prepare_distribution()
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 292, in _prepare_distribution
return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/operations/prepare.py", line 482, in prepare_linked_requirement
return self._prepare_linked_requirement(req, parallel_builds)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/operations/prepare.py", line 528, in _prepare_linked_requirement
link, req.source_dir, self._download, self.download_dir, hashes
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/operations/prepare.py", line 217, in unpack_url
hashes=hashes,
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/operations/prepare.py", line 94, in get_http_url
from_path, content_type = download(link, temp_dir.path)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/network/download.py", line 145, in __call__
for chunk in chunks:
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/cli/progress_bars.py", line 144, in iter
for x in it:
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_internal/network/utils.py", line 87, in response_chunks
decode_content=False,
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/urllib3/response.py", line 576, in stream
data = self.read(amt=amt, decode_content=decode_content)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/urllib3/response.py", line 519, in read
data = self._fp.read(amt) if not fp_closed else b""
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py", line 65, in read
self._close()
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py", line 52, in _close
self.__callback(self.__buf.getvalue())
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/cachecontrol/controller.py", line 309, in cache_response
cache_url, self.serializer.dumps(request, response, body=body)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/cachecontrol/serialize.py", line 72, in dumps
return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py", line 960, in pack
self._pack(obj)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py", line 944, in _pack
len(obj), dict_iteritems(obj), nest_limit - 1
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py", line 1045, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py", line 944, in _pack
len(obj), dict_iteritems(obj), nest_limit - 1
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py", line 1045, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/var/app/venv/staging-LQM1lest/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py", line 889, in _pack
return self._buffer.write(obj)
Can anyone please confirm if the application works on EB?
The issue is probably with tensorflow==2.2.0. This is a very heavy library and you can't install it on t2.micro. You need at least t2.medium (not in free tier) which has more RAM to successfully install tensorflow==2.2.0 on EB.
Related
I am doing hyperparameter tuning on GCP using this scikit docker image. When I add the aiplatform package as a dependency, things break. The error comes from the bigquery import.
from google.cloud import bigquery
The error message is below.
The replica workerpool0-0 exited with a non-zero status of 1.
Traceback (most recent call last):
[...]
File "/root/.local/lib/python3.7/site-packages/trainer/task.py", line 7, in
from google.cloud import storage, bigquery
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery/__init__.py", line 35, in
from google.cloud.bigquery.client import Client
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery/client.py", line 60, in
from google.cloud.bigquery import _pandas_helpers
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery/_pandas_helpers.py", line 40, in
from google.cloud.bigquery import schema
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery/schema.py", line 19, in
from google.cloud.bigquery_v2 import types
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery_v2/__init__.py", line 23, in
from google.cloud.bigquery_v2 import types
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery_v2/types.py", line 23, in
from google.cloud.bigquery_v2.proto import encryption_config_pb2
File "/usr/local/lib/python3.7/dist-packages/google/cloud/bigquery_v2/proto/encryption_config_pb2.py", line 64, in
file=DESCRIPTOR,
File "/root/.local/lib/python3.7/site-packages/google/protobuf/descriptor.py", line 560, in __new__
_message.Message._CheckCalledFromGeneratedFile()
TypeError: Descriptors cannot not be created directly.
If this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.
If you cannot immediately regenerate your protos, some other possible workarounds are:
1. Downgrade the protobuf package to 3.20.x or lower.
2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).
From the logs, I can see the system is downloading google-cloud-aiplatform v1.17.0. According to the scikit docker image, google-cloud-storage v1.35.0 is installed, but google-cloud-aiplatform drags in v2.5.0.
I am thinking I need to downgrade google-cloud-aiplatform to a specific version. Anyone know which version or how to resolve this problem?
UPDATE: FWIW, if I downgrade google-cloud-aiplatform==1.15.1 then the problem above goes away. However, this problem below shows.
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/root/.local/lib/python3.7/site-packages/trainer/hpt.py", line 170, in
staging_bucket=f'{args.bucket_uri}'
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/initializer.py", line 138, in init
backing_tensorboard=experiment_tensorboard,
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/metadata/metadata.py", line 235, in set_experiment
experiment_name=experiment, description=description
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/metadata/experiment_resources.py", line 247, in get_or_create
project=project, location=location, credentials=credentials
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/metadata/metadata_store.py", line 283, in ensure_default_metadata_store_exists
encryption_spec_key_name=encryption_key_spec_name,
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/metadata/metadata_store.py", line 123, in get_or_create
credentials=credentials,
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/metadata/metadata_store.py", line 241, in _get
credentials=credentials,
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/metadata/metadata_store.py", line 73, in __init__
self._gca_resource = self._get_gca_resource(resource_name=metadata_store_name)
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/base.py", line 617, in _get_gca_resource
return getattr(self.api_client, self._getter_method)(
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/utils/__init__.py", line 425, in __getattr__
return getattr(self._clients[self._default_version], name)
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform/utils/__init__.py", line 359, in __getattr__
client_info=self._client_info,
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform_v1/services/metadata_service/client.py", line 547, in __init__
api_audience=client_options.api_audience,
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py", line 190, in __init__
("grpc.max_receive_message_length", -1),
File "/root/.local/lib/python3.7/site-packages/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py", line 241, in create_channel
**kwargs,
File "/root/.local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 318, in create_channel
default_host=default_host,
File "/root/.local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 239, in _create_composite_credentials
credentials, scopes=scopes, default_scopes=default_scopes
TypeError: with_scopes_if_required() got an unexpected keyword argument 'default_scopes'
I try to run pytest with allure on my docker image, but it reports an "invalid syntax" error. Is there any python version requirement for allure? The python version on my docker image is 2.7.13.
Can anyone help me?
root#ubuntu:/fuego-rw/buildzone# pytest allure_title.py
Traceback (most recent call last):
File "/usr/local/bin/pytest", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/_pytest/config/__init__.py", line 65, in main
config = _prepareconfig(args, plugins)
File "/usr/local/lib/python2.7/dist-packages/_pytest/config/__init__.py", line 214, in _prepareconfig
pluginmanager=pluginmanager, args=args
File "/usr/local/lib/python2.7/dist-packages/pluggy/hooks.py", line 286, in __call__
return self._hookexec(self, self.get_hookimpls(), kwargs)
File "/usr/local/lib/python2.7/dist-packages/pluggy/manager.py", line 93, in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
File "/usr/local/lib/python2.7/dist-packages/pluggy/manager.py", line 87, in <lambda>
firstresult=hook.spec.opts.get("firstresult") if hook.spec else False,
File "/usr/local/lib/python2.7/dist-packages/pluggy/callers.py", line 203, in _multicall
gen.send(outcome)
File "/usr/local/lib/python2.7/dist-packages/_pytest/helpconfig.py", line 94, in pytest_cmdline_parse
config = outcome.get_result()
File "/usr/local/lib/python2.7/dist-packages/pluggy/callers.py", line 81, in get_result
_reraise(*ex) # noqa
File "/usr/local/lib/python2.7/dist-packages/pluggy/callers.py", line 187, in _multicall
res = hook_impl.function(*args)
File "/usr/local/lib/python2.7/dist-packages/_pytest/config/__init__.py", line 789, in pytest_cmdline_parse
self.parse(args)
File "/usr/local/lib/python2.7/dist-packages/_pytest/config/__init__.py", line 997, in parse
self._preparse(args, addopts=addopts)
File "/usr/local/lib/python2.7/dist-packages/_pytest/config/__init__.py", line 943, in _preparse
self.pluginmanager.load_setuptools_entrypoints("pytest11")
File "/usr/local/lib/python2.7/dist-packages/pluggy/manager.py", line 299, in load_setuptools_entrypoints
plugin = ep.load()
File "/usr/local/lib/python2.7/dist-packages/importlib_metadata/__init__.py", line 105, in load
module = import_module(match.group('module'))
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/usr/local/lib/python2.7/dist-packages/_pytest/assertion/rewrite.py", line 304, in load_module
exec(co, mod.__dict__)
File "/usr/local/lib/python2.7/dist-packages/allure_pytest/plugin.py", line 3, in <module>
import allure
File "/usr/local/lib/python2.7/dist-packages/allure.py", line 1, in <module>
from allure_commons._allure import title
File "/usr/local/lib/python2.7/dist-packages/allure_commons/__init__.py", line 3, in <module>
from allure_commons._allure import fixture # noqa: F401
File "/usr/local/lib/python2.7/dist-packages/allure_commons/_allure.py", line 165
def __call__(self, func: _TFunc) -> _TFunc:
^
SyntaxError: invalid syntax
This syntax:
func: _TFunc
is called a type hint, meaning the variable func is expected to be of type _TFunc (in simpler terms, num: int means the variable num is expected to be int).
The type hint feature was only made available from Python3.0 (PEP 3107) and Python3.5 (PEP 484), thus isn't available in the version you are using which is Python2.7.
def func(num: int):
print(num)
func(1)
Using Python2
File "Main.py", line 1
def func(num: int):
^
SyntaxError: invalid syntax
Using Python3
1
Either upgrade your Python version to >=3.5 or use an older version of allure-pytest. I would advise to upgrade Python as that would be more sustainable.
Could you please help to solve this problem.
I have installed Python2.7, Selenium2Library, WXPython; Robot Framework...
when I want to start the Ride I got the following error.
:~$ ride.py
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/robotide/__init__.py", line 74, in main
_run(inpath, not noupdatecheck, debug_console)
File "/usr/local/lib/python2.7/dist-packages/robotide/__init__.py", line 100, in _run
ride = RIDE(inpath, updatecheck)
File "/usr/local/lib/python2.7/dist-packages/robotide/application/application.py", line 42, in __init__
wx.App.__init__(self, redirect=False)
File "/usr/lib/python2.7/dist-packages/wx-3.0-gtk2/wx/_core.py", line 8628, in __init__
self._BootstrapApp()
File "/usr/lib/python2.7/dist-packages/wx-3.0-gtk2/wx/_core.py", line 8196, in _BootstrapApp
return _core_.PyApp__BootstrapApp(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/robotide/application/application.py", line 47, in OnInit
self.settings = RideSettings()
File "/usr/local/lib/python2.7/dist-packages/robotide/preferences/settings.py", line 316, in __init__
user_path = initialize_settings(default_path)
File "/usr/local/lib/python2.7/dist-packages/robotide/preferences/settings.py", line 29, in initialize_settings
SETTINGS_DIRECTORY, path, dest_file_name)
File "/usr/local/lib/python2.7/dist-packages/robotide/preferences/settings.py", line 46, in _copy_or_migrate_user_settings
SettingsMigrator(source_path, settings_path).migrate()
File "/usr/local/lib/python2.7/dist-packages/robotide/preferences/settings.py", line 94, in migrate
self.merge()
File "/usr/local/lib/python2.7/dist-packages/robotide/preferences/settings.py", line 98, in merge
self._write_merged_settings(self._default_settings, self._user_path)
File "/usr/local/lib/python2.7/dist-packages/robotide/preferences/settings.py", line 186, in _write_merged_settings
'Could not open settings file "%s" for writing' % path)
RuntimeError: Could not open settings file "/home/said/.robotframework/ride/settings.cfg" for writing
The error message is clear:
RuntimeError: Could not open settings file "/home/said/.robotframework/ride/settings.cfg" for writing
Possible solutions:
cd ; pwd ;
/home/said
rm -rf .robotframework
or
sudo chown -R said:said .robotframework
I have managed to royally screw myself over by doing something which seemed innocuous.
i was getting the following error from my python script (brand['feed'] = the URL i'm making the request to):
**C:\Python27\lib\site-packages\requests\packages\urllib3\util\ssl_.py:90: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.
InsecurePlatformWarning
Traceback (most recent call last):
File "D:\Phocas\Phocas-Automation\analytics\download_feed_cats.py", line 18, in <module>
data = requests.get(brand['feed'])
File "C:\Python27\lib\site-packages\requests\api.py", line 69, in get
return request('get', url, params=params, **kwargs)
File "C:\Python27\lib\site-packages\requests\api.py", line 50, in request
response = session.request(method=method, url=url, **kwargs)
File "C:\Python27\lib\site-packages\requests\sessions.py", line 465, in request
resp = self.send(prep, **send_kwargs)
File "C:\Python27\lib\site-packages\requests\sessions.py", line 594, in send
history = [resp for resp in gen] if allow_redirects else []
File "C:\Python27\lib\site-packages\requests\sessions.py", line 196, in resolve_redirects
**adapter_kwargs
File "C:\Python27\lib\site-packages\requests\sessions.py", line 573, in send
r = adapter.send(request, **kwargs)
File "C:\Python27\lib\site-packages\requests\adapters.py", line 431, in send
raise SSLError(e, request=request)
requests.exceptions.SSLError: [Errno 1] _ssl.c:507: error:14090086:SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed**
So i asked the internet and the internet said do this:
pip install --upgrade ndg-httpsclient
So i did that and now i keep getting a RuntimeError when i run the same script, what's crazy is even if i just try and run pip in the shell, i get the same error!!
Phocas_Tommy#p3303386 MINGW64 /c/Python27
$ pip
Traceback (most recent call last):
File "C:\Python27\lib\runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "C:\Python27\lib\runpy.py", line 72, in _run_code
exec code in run_globals
File "C:\Python27\Scripts\pip.exe\__main__.py", line 5, in <module>
File "C:\Python27\lib\site-packages\pip\__init__.py", line 11, in <module>
from pip.vcs import git, mercurial, subversion, bazaar # noqa
File "C:\Python27\lib\site-packages\pip\vcs\mercurial.py", line 9, in <module>
from pip.download import path_to_url
File "C:\Python27\lib\site-packages\pip\download.py", line 22, in <module>
from pip._vendor import requests, six
File "C:\Python27\lib\site-packages\pip\_vendor\requests\__init__.py", line 53, in <module>
from .packages.urllib3.contrib import pyopenssl
File "C:\Python27\lib\site-packages\pip\_vendor\requests\packages\urllib3\contrib\pyopenssl.py", line 43, in <module>
import OpenSSL.SSL
File "C:\Python27\lib\site-packages\OpenSSL\__init__.py", line 8, in <module>
from OpenSSL import rand, crypto, SSL
File "C:\Python27\lib\site-packages\OpenSSL\rand.py", line 12, in <module>
from OpenSSL._util import (
File "C:\Python27\lib\site-packages\OpenSSL\_util.py", line 9, in <module>
binding = Binding()
File "C:\Python27\lib\site-packages\cryptography\hazmat\bindings\openssl\binding.py", line 114, in __init__
self._ensure_ffi_initialized()
File "C:\Python27\lib\site-packages\cryptography\hazmat\bindings\openssl\binding.py", line 126, in _ensure_ffi_initialized
cls._modules,
File "C:\Python27\lib\site-packages\cryptography\hazmat\bindings\utils.py", line 31, in load_library_for_binding
lib = ffi.verifier.load_library()
File "C:\Python27\lib\site-packages\cffi\verifier.py", line 96, in load_library
self._compile_module()
File "C:\Python27\lib\site-packages\cryptography\hazmat\bindings\utils.py", line 127, in _compile_module
"Attempted implicit compile of a cffi module. All cffi modules should "
RuntimeError: Attempted implicit compile of a cffi module. All cffi modules should be pre-compiled at installation time.
I have searched this error and can't seem to find anything which helps. I'm using python 2.7.6 on Windows Server 2008 R2 Standard 64-bit
Re-installing Python fixed this problem
I am trying to setup dynamic thumbnail service thumbor and to support s3 as storage, I need to setup this community powered pip library for aws.
Its working well on my local environment but when I am trying to host it on one of our servers, I am getting NoCredentialsError. I am assuming this is because of difference versions of botocore (latest one and one installed by pip library). Here is error log:
File "/usr/local/lib/python2.7/dist-packages/botocore/session.py", line 774, in get_component
# client config from the session
File "/usr/local/lib/python2.7/dist-packages/botocore/session.py", line 174, in <lambda>
self._components.lazy_register_component(
File "/usr/local/lib/python2.7/dist-packages/botocore/session.py", line 453, in get_data
- agent_version is the value of the `user_agent_version`
File "/usr/local/lib/python2.7/dist-packages/botocore/loaders.py", line 119, in _wrapper
data = func(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/botocore/loaders.py", line 364, in load_data
DataNotFoundError: Unable to load data for: _endpoints
2016-04-24 12:14:34 tornado.application:ERROR Future exception was never retrieved: Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 230, in wrapper
yielded = next(result)
File "/usr/local/lib/python2.7/dist-packages/thumbor/handlers/imaging.py", line 31, in check_image
exists = yield gen.maybe_future(self.context.modules.storage.exists(kw['image'][:self.context.config.MAX_ID_LENGTH]))
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 455, in wrapper
future.result()
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 215, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 443, in wrapper
result = f(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tc_aws/aws/storage.py", line 107, in exists
self.storage.get(file_abspath, callback=return_data)
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 455, in wrapper
future.result()
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 215, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 443, in wrapper
result = f(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tc_aws/aws/bucket.py", line 44, in get
Key=self._clean_key(path),
File "/usr/local/lib/python2.7/dist-packages/tornado_botocore/base.py", line 97, in call
return self._make_api_call(operation_name=self.operation, api_params=kwargs, callback=callback)
File "/usr/local/lib/python2.7/dist-packages/tornado_botocore/base.py", line 60, in _make_api_call
operation_model=operation_model, request_dict=request_dict, callback=callback)
File "/usr/local/lib/python2.7/dist-packages/tornado_botocore/base.py", line 54, in _make_request
request_dict=request_dict, operation_model=operation_model, callback=callback)
File "/usr/local/lib/python2.7/dist-packages/tornado_botocore/base.py", line 32, in _send_request
request = self.endpoint.create_request(request_dict, operation_model)
File "/usr/local/lib/python2.7/dist-packages/botocore/endpoint.py", line 126, in create_request
operation_name=operation_model.name)
File "/usr/local/lib/python2.7/dist-packages/botocore/hooks.py", line 226, in emit
return self._emit(event_name, kwargs)
File "/usr/local/lib/python2.7/dist-packages/botocore/hooks.py", line 209, in _emit
response = handler(**kwargs)
File "/usr/local/lib/python2.7/dist-packages/botocore/signers.py", line 90, in handler
return self.sign(operation_name, request)
File "/usr/local/lib/python2.7/dist-packages/botocore/signers.py", line 124, in sign
signer.add_auth(request=request)
File "/usr/local/lib/python2.7/dist-packages/botocore/auth.py", line 626, in add_auth
raise NoCredentialsError
NoCredentialsError: Unable to locate credentials
Could it be fixed with proper ordering in which I install libraries? Because the pip library removes existing newer version of botocore and installs an older version.
EDIT:
I am running processes with supervisor and it seems process cant access aws credentials
EDIT 2:
The issue got resolved with proper configuration of supervisor. The user for process started by supervisor did not have access to config file
The issue got resolved with proper configuration of supervisor. The user for subprocess started by supervisor did not have access to aws config file. So it was working with local environment or creating process separately but not with supervisor.