I would like to provide a user with different default file names based on the wildcard that they select.
It seems that pyface.FileDialog inherits from HasTraits so I should be above to observe it's wildcard_index trait to notice the change and update the default_filename trait.
Here are my versions,
import pyface, traits, traitsui
pyface.__version__, traits.__version__, traitsui.__version__
('6.1.2', '5.1.2', '6.1.3')
EDM python environment
import sys
sys.version
'2.7.15 |Enthought, Inc. (x86_64)| (default, Jun 21 2018, 22:10:16) [MSC v.1500 64 bit (AMD64)]'
Using the WX backend
import wx
wx.version()
'3.0.2.0 msw (classic)'
Here is the simplest possible demo. of the problem,
from pyface.api import FileDialog
from traits.api import on_trait_change
class MyFileDialog(FileDialog):
""" Subclass that allows the suggested file name to change based on the wildcard type.
"""
#on_trait_change('wildcard_index')
def on_wildcard_changed(self, idx):
# This is never called
self.default_filename = [
'filename_john',
'filename_paul',
'filename_george',
'filename_ringo'][idx]
if __name__ == '__main__':
types = ["*.a", "*.b", "*.c", "*.d"]
dialog = MyFileDialog(
action="save as",
wildcard="|".join(["%s|%s" % (t, t) for t in types]),
)
dialog.open()
I suggest that you post this question to the ets-users google group (For viewers not familiar with it, this is at: https://groups.google.com/forum/#!forum/ets-users).
Related
Here I want to use SFTPToGCSOperator in composer enviornment(1.10.6) of GCP. I know there is a limitation because The operator present only in latest version of airflow not in composer latest version 1.10.6.
See the refrence -
https://airflow.readthedocs.io/en/latest/howto/operator/gcp/sftp_to_gcs.html
I found the alternative of operator and I created a plugin class, But again I faced the issue for sftphook class, Now I am using older version of sftphook class.
see the below refrence -
from airflow.contrib.hooks.sftp_hook import SFTPHook
https://airflow.apache.org/docs/stable/_modules/airflow/contrib/hooks/sftp_hook.html
I have created a plugin class, later It's import in my DAG script. It's working fine only when we are moveing one file, In that case we need to pass complete file path with extension.
Please refer below example(It's working fine in this scenrio)
DIR = "/test/sftp_dag_test/source_dir"
OBJECT_SRC_1 = "file.csv"
source_path=os.path.join(DIR, OBJECT_SRC_1),
Except this If we are using wildcard, I mean if we want to move all the files from directory I am getting error for get_tree_map method.
Please see below DAG code
import os
from airflow import models
from airflow.models import Variable
from PluginSFTPToGCSOperator import SFTPToGCSOperator
#from airflow.contrib.operators.sftp_to_gcs import SFTPToGCSOperator
from airflow.utils.dates import days_ago
default_args = {"start_date": days_ago(1)}
DIR_path = "/main_dir/sub_dir/"
BUCKET_SRC = "test-gcp-bucket"
with models.DAG(
"dag_sftp_to_gcs", default_args=default_args, schedule_interval=None
) as dag:
copy_sftp_to_gcs = SFTPToGCSOperator(
task_id="t_sftp_to_gcs",
sftp_conn_id="test_sftp_conn",
gcp_conn_id="google_cloud_default",
source_path=os.path.join(DIR_path, "*.gz"),
destination_bucket=BUCKET_SRC,
)
copy_sftp_to_gcs
Here we are using wildcard * in DAG script, please see below plugin class.
import os
from tempfile import NamedTemporaryFile
from typing import Optional, Union
from airflow.plugins_manager import AirflowPlugin
from airflow import AirflowException
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.contrib.hooks.sftp_hook import SFTPHook
from airflow.utils.decorators import apply_defaults
WILDCARD = "*"
class SFTPToGCSOperator(BaseOperator):
template_fields = ("source_path", "destination_path", "destination_bucket")
#apply_defaults
def __init__(
self,
source_path: str,
destination_bucket: str = "destination_bucket",
destination_path: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
sftp_conn_id: str = "sftp_conn_plugin",
delegate_to: Optional[str] = None,
mime_type: str = "application/octet-stream",
gzip: bool = False,
move_object: bool = False,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.source_path = source_path
self.destination_path = self._set_destination_path(destination_path)
print('destination_bucket : ',destination_bucket)
self.destination_bucket = destination_bucket
self.gcp_conn_id = gcp_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
self.gzip = gzip
self.sftp_conn_id = sftp_conn_id
self.move_object = move_object
def execute(self, context):
print("inside execute")
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to
)
sftp_hook = SFTPHook(self.sftp_conn_id)
if WILDCARD in self.source_path:
total_wildcards = self.source_path.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in source_path parameter. "
"Found {} in {}.".format(total_wildcards, self.source_path)
)
print('self.source_path : ',self.source_path)
prefix, delimiter = self.source_path.split(WILDCARD, 1)
print('prefix : ',prefix)
base_path = os.path.dirname(prefix)
print('base_path : ',base_path)
files, _, _ = sftp_hook.get_tree_map(
base_path, prefix=prefix, delimiter=delimiter
)
for file in files:
destination_path = file.replace(base_path, self.destination_path, 1)
self._copy_single_object(gcs_hook, sftp_hook, file, destination_path)
else:
destination_object = (
self.destination_path
if self.destination_path
else self.source_path.rsplit("/", 1)[1]
)
self._copy_single_object(
gcs_hook, sftp_hook, self.source_path, destination_object
)
def _copy_single_object(
self,
gcs_hook: GoogleCloudStorageHook,
sftp_hook: SFTPHook,
source_path: str,
destination_object: str,
) -> None:
"""
Helper function to copy single object.
"""
self.log.info(
"Executing copy of %s to gs://%s/%s",
source_path,
self.destination_bucket,
destination_object,
)
with NamedTemporaryFile("w") as tmp:
sftp_hook.retrieve_file(source_path, tmp.name)
print('before upload self det object : ',self.destination_bucket)
gcs_hook.upload(
self.destination_bucket,
destination_object,
tmp.name,
self.mime_type,
)
if self.move_object:
self.log.info("Executing delete of %s", source_path)
sftp_hook.delete_file(source_path)
#staticmethod
def _set_destination_path(path: Union[str, None]) -> str:
if path is not None:
return path.lstrip("/") if path.startswith("/") else path
return ""
#staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
class SFTPToGCSOperatorPlugin(AirflowPlugin):
name = "SFTPToGCSOperatorPlugin"
operators = [SFTPToGCSOperator]
So this plugin class I am importing in my DAG script and it's wotking fine when we are using file name, Because code is going inside else condition.
But when we are using wildcard we have cursor inside if condition and I am getting error for get_tree_map method.
see below error -
ERROR - 'SFTPHook' object has no attribute 'get_tree_map'
I found the reason of this error this method itself is not present in composer(airflow 1.10.6)-
https://airflow.apache.org/docs/stable/_modules/airflow/contrib/hooks/sftp_hook.html
This method is present in latest version of airflow
https://airflow.readthedocs.io/en/latest/_modules/airflow/providers/sftp/hooks/sftp.html
Now What should I can try, Is there any alternative of this method or any alternative of this operator class.
Does anyone know if there is a solution for this?
Thanks in Advance.
Please ignore Typo or indentation error in stackoverflow. In my code there is no Indentation error.
"providers" packages are only available from Airflow 2.0, which is not yet available in Cloud Composer (as I write this post, the latest available Airflow image is 1.10.14, released this morning).
BUT you can import backport packages which let you enjoy these new packages in earlier versions 1.10.*.
My requirements.txt:
apache-airflow-backport-providers-ssh==2020.10.29
apache-airflow-backport-providers-sftp==2020.10.29
pysftp>=0.2.9
paramiko>=2.6.0
sshtunnel<0.2,>=0.1.4
You can import PyPi packages directly in your Composer environment from the console.
With these dependencies, I could use the newest airflow.providers.ssh.operators.ssh.SSHOperator (formerly airflow.contrib.operators.ssh_operator.SSHOperator) and the new airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSToSFTPOperator (which had no equivalent in contrib operators).
Enjoy!
To use SFTPToGCSOperator in Google Cloud Composer on Airflow version 1.10.6 we need to create a plugin and somehow "hack" Airflow by copying operator/hook codes into one file to enable SFTPToGCSOperator use code from Airflow 1.10.10 version.
The latest Airflow version has a new airflow.providers directory, which does not exist in earlier versions. This is why you saw following error: No module named airflow.providers. All the changes I made are described here:
I prepared working plugin, which you can download here. Before using it, we have to install following PyPI libraries on the Cloud Composer environment: pysftp, paramiko, sshtunnel.
I copied full SFTPToGCSOperator code, which starts in 792nd line. You can see that this operator uses GCSHook:
from airflow.providers.google.cloud.hooks.gcs import GCSHook
which also need to be copied to the plugin - starts in 193rd line.
Then, GCSHook inherits from GoogleBaseHook class, which we can change for GoogleCloudBaseHook accessible in Airflow 1.10.6 version, and import it:
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
Finally, there is a need to import SFTPHook code into the plugin - starts in 39th line, which inherits from SSHHook class, we can use one from Airflow 1.10.6 version by changing import statement:
from airflow.contrib.hooks.ssh_hook import SSHHook
At the end of file, you can find the definition of the plugin:
class SFTPToGCSOperatorPlugin(AirflowPlugin):
name = "SFTPToGCSOperatorPlugin"
operators = [SFTPToGCSOperator]
Plugin creation is needed, as an Airflow built-in operator is not currently available in Airflow 1.10.6 version (the latest in Cloud Composer). You can keep an eye on Cloud Composer version lists in order to see when the newest version of Airflow will be available to use.
I hope you find the above pieces of information useful.
I have created an automation framework using toolium with Appium which works for both IOS and Android. Toolium is a python wrapper that I've used to facilitate page object modelling. Basically the UI is separated from the test case so that the same test case can be used across android as well as IOS.
I now need to get the framework working with IOS 10 (With XCUI test framework). So I have changed the elements for IOS so as to support XCUI (Places were XPATH is used and there is no other means of element identification). There is no change in the folder structure/execution mechanism whatsoever. But with the new framework I get an import error from toolium.
Code from tooling mobile page objects.py looks something like this.
# -*- coding: utf-8 -*-
import importlib
from toolium.driver_wrapper import DriverWrappersPool
from toolium.pageobjects.page_object import PageObject
class MobilePageObject(PageObject):
def __new__(cls, driver_wrapper=None):
"""Instantiate android or ios page object from base page object depending on driver configuration
Base, Android and iOS page objects must be defined with following structure:
FOLDER/base/MODULE_NAME.py
class BasePAGE_OBJECT_NAME(MobilePageObject)
FOLDER/android/MODULE_NAME.py
class AndroidPAGE_OBJECT_NAME(BasePAGE_OBJECT_NAME)
FOLDER/ios/MODULE_NAME.py
class IosPAGE_OBJECT_NAME(BasePAGE_OBJECT_NAME)
:param driver_wrapper: driver wrapper instance
:returns: android or ios page object instance
"""
if cls.__name__.startswith('Base'):
__driver_wrapper = driver_wrapper if driver_wrapper else DriverWrappersPool.get_default_wrapper()
__os_name = 'ios' if __driver_wrapper.is_ios_test() else 'android'
__class_name = cls.__name__.replace('Base', __os_name.capitalize())
try:
return getattr(importlib.import_module(cls.__module__), __class_name)(__driver_wrapper)
except AttributeError:
__module_name = cls.__module__.replace('.base.', '.{}.'.format(__os_name))
print __module_name
print __class_name
print __driver_wrapper
return getattr(importlib.import_module(__module_name), __class_name)(__driver_wrapper)
else:
return super(MobilePageObject, cls).__new__(cls)
I follow the folder structure as mentioned in toolium. Basically I have,
pageobjects folder under which I have base folder, ios folder and android folder. All my methods are in the base class. The elements are picked up either from the iOS folder or android folder at run time based on the driver type.
Below is the error from the import module
name = 'pageobjects.ios.intro', package = None
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
E ImportError: No module named ios.intro
When I print the module name and class name this is what I get.
module name = pageobjects.ios.intro
class name = IosIntroduction
intro is one of the modules basically. I access it something like this
from pageobjects.base.intro import BaseIntroduction
On the same machine I have the old framework working without any problem. I have checked environment variables/permissions etc. But I can't seem to figure out as to why the import is failing.
PS: I am running this on MACOSX and also use virtualenvironment for python
Just starting in on my Python learning curve, and hitting a snag in porting some code up to Python 2.7. It appears that in Python 2.7 it is no longer possible to perform a deepcopy() on instances of ConfigParser. It also appears that the Python team isn't terribly interested in restoring such a capability:
http://bugs.python.org/issue16058
Can someone propose an elegant solution for manually constructing a deepcopy/duplicate of an instance of ConfigParser?
Many thanks, -Pete
This is just an example implementation of Jan Vlcinsky answer written in Python 3 (I don't have enough reputation to post this as a comment to Jans answer). Many thanks to Jan for the push in the right direction.
To make a full (deep) copy of base_config into new_config just do the following;
import io
import configparser
config_string = io.StringIO()
base_config.write(config_string)
# We must reset the buffer ready for reading.
config_string.seek(0)
new_config = configparser.ConfigParser()
new_config.read_file(config_string)
Based on #Toenex answer, modified for Python 2.7:
import StringIO
import ConfigParser
# Create a deep copy of the configuration object
config_string = StringIO.StringIO()
base_config.write(config_string)
# We must reset the buffer to make it ready for reading.
config_string.seek(0)
new_config = ConfigParser.ConfigParser()
new_config.readfp(config_string)
The previous solution doesn't work in all python3 use cases. Specifically if the original parser is using Extended Interpolation the copy may fail to work correctly. Fortunately, the easy solution is to use the pickle module:
def deep_copy(config:configparser.ConfigParser)->configparser.ConfigParser:
"""deep copy config"""
rep = pickle.dumps(config)
new_config = pickle.loads(rep)
return new_config
If you need new independent copy of ConfigParser, then one option is:
have original version of ConfigParser
serialize the config file into temporary file or StringIO buffer
use that tmpfile or StringIO buffer to create new ConfigParser.
And you have it done.
If you are using Python 3 (3.2+) you can use the Mapping Protocol Access to copy (actually deep copy) the sections and options of a source configuration to another ConfigParser object.
You can use read_dict() to copy the state of a configuration parser.
Here is a demo:
import configparser
# the configuration to deep copy:
src_cfg = configparser.ConfigParser()
src_cfg.add_section("Section A")
src_cfg["Section A"]["key1"] = "value1"
src_cfg["Section A"]["key2"] = "value2"
# the destination configuration
dst_cfg = configparser.ConfigParser()
dst_cfg.read_dict(src_cfg)
dst_cfg.add_section("Section B")
dst_cfg["Section B"]["key3"] = "value3"
To display the resulting configuration, you can try:
import io
output = io.StringIO()
dst_cfg.write(output)
print(output.getvalue())
You get:
[Section A]
key1 = value1
key2 = value2
[Section B]
key3 = value3
After reading this article, I am more familiar with config.ini.
Record as follows:
import io
import configparser
def copy_config_demo():
with io.StringIO() as memory_file:
memory_file.write(str(test_config_data.__doc__)) # original_config.write(memory_file)
memory_file.seek(0)
new_config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
new_config.read_file(memory_file)
# below is just for test
for section_name, list_item in [(section_name, new_config.items(section_name)) for section_name in new_config.sections()]:
print('\n[' + section_name + ']')
for key, value in list_item:
print(f'{key}: {value}')
def test_config_data():
"""
[Common]
home_dir: /Users
library_dir: /Library
system_dir: /System
macports_dir: /opt/local
[Frameworks]
Python: >=3.2
path: ${Common:system_dir}/Library/Frameworks/
[Arthur]
name: Carson
my_dir: ${Common:home_dir}/twosheds
my_pictures: ${my_dir}/Pictures
python_dir: ${Frameworks:path}/Python/Versions/${Frameworks:Python}
"""
output:
[Common]
home_dir: /Users
library_dir: /Library
system_dir: /System
macports_dir: /opt/local
[Frameworks]
python: >=3.2
path: /System/Library/Frameworks/
[Arthur]
name: Carson
my_dir: /Users/twosheds
my_pictures: /Users/twosheds/Pictures
python_dir: /System/Library/Frameworks//Python/Versions/>=3.2
hoping it is helpful to you.
Okay simple question (I think).
I have a DateTime field (auto_add_now) and when output to a template
{{ edited|date:"DATETIME_FORMAT" }}
I get the expected result of "Sept. 16, 2012, 12:01 p.m."
But unfortunately things are slightly more complicated since I am using Backbone.js and need to pass the datetime with JSON, and since it is only used for display purposes I decided to pass it as a nice locale formatted string. So I dug into the code and found what the template tag uses and this is what I setup.
from django.utils.formats import date_format
return {
'created': date_format(self.created, 'DATETIME_FORMAT'),
}
But that ends up with this "Sept. 16, 2012, 5:01 p.m."
I have a feeling it has to do with the following on the template tag
#register.filter(expects_localtime=True, is_safe=False)
I also tried this but ended up with the same results
from django.utils import timezone
tz = timezone.get_current_timezone()
logger.info(tz)
logger.info(self.edited)
logger.info(format(self.edited, 'DATETIME_FORMAT'))
logger.info(self.edited.replace(tzinfo=tz))
logger.info(format(self.edited.replace(tzinfo=tz), 'DATETIME_FORMAT'))
Which gave me this
INFO: America/Chicago
INFO: 2012-09-16 17:01:52.921276+00:00
INFO: Sept. 16, 2012, 5:01 p.m.
INFO: 2012-09-16 17:01:52.921276-06:00
INFO: Sept. 16, 2012, 5:01 p.m.
So yeah, I must be missing something, and I have been up and down the django documentation and cannot find anything that could point me to what I am doing wrong. Thanks for any help.
I figured it out. And sadly it was in the Django Timezones documentation that I thought I had exhausted. Localize Usage timezone.localtime()
from django.utils.formats import date_format
from django.utils import timezone
date_format(timezone.localtime(page.created), 'DATETIME_FORMAT')
Maybe the following will help you.
>>> obj = MyModel.objects.get(...)
>>> data = {"date_format": obj.edited}
>>> from django.core.serializers.json import DjangoJSONEncoder
>>> data = json.dumps(data, cls=DjangoJSONEncoder)
>>> data
'{"date_format": "2012-09-16T21:45:46Z"}'
Send the json formatted data from your view:
E.g return HttpResponse(data, mimetype='application/json').
And then at your client side code you can convert the date_format to the local timezone with:
(Assuming response is the JSON parsed object)
var d = new Date(Date.parse(response.date_format));
// Sun Sep 16 2012 22:45:46 GMT+0100 (BST)
I'm following the project structure as laid out by Zachary Voase, but I'm struggling with one specific issue.
I'd very much like to have a custom settings boolean variable (let's call it SEND_LIVE_MAIL) that I would be using in the project. Basically, I'd like to use this settings variable in my code and if SEND_LIVE_MAIL is True actually send out a mail, whereas when it is set to False just print its contents out to the console. The latter would apply to the dev environment and when running unittests.
What would be a good way of implementing this? Currently, depending on the environment, the django server uses dev, staging or prd settings, but for custom settings variables I believe these need to be imported 'literally'. In other words, I'd be using in my views something like
from settings.development import SEND_LIVE_MAIL
which of course isn't what I want. I'd like to be able to do something like:
from settings import SEND_LIVE_MAIL
and depending on the environment, the correct value is assigned to the SEND_LIVE_MAIL variable.
Thanks in advance!
You shouldn't be importing directly from your settings files anyways. Use:
>>> from django.conf import settings
>>> settings.SEND_LIVE_MAIL
True
The simplest solution is to have this at the bottom of your settings file:
try:
from local_settings import *
except ImportError:
pass
And in local_settings.py specify all your environment-specific overrides. I generally don't commit this file to version control.
There are more advanced ways of doing it, where you end up with a default settings file and a per-environment override.
This article by David Cramer covers the various approaches, including both of the ones I've mentioned: http://justcramer.com/2011/01/13/settings-in-django/
import os
PROJECT_PATH = os.path.dirname(__file__)
try:
execfile(os.path.join(PROJECT_PATH, local_settings.py'))
except IOError:
pass
Then you can have your local_settings.py behave as if it was pasted directly into your settings.py:
$ cat local_settings.py
INSTALLED_APPS += ['foo']
You can do something like this for a wide variety of environment based settings, but here's an example for just SEND_LIVE_MAIL.
settings_config.py
import re
import socket
class Config:
def __init__(self):
fqdn = socket.getfqdn()
env = re.search(r'(devhost|stagehost|prodhost)', fqdn)
env = env and env.group(1)
env = env or 'devhost'
if env == 'devhost':
self.SEND_LIVE_MAIL = # whatever
elif env == 'stagehost':
self.SEND_LIVE_MAIL = # whatever
elif env == 'prodhost':
self.SEND_LIVE_MAIL = # whatever
config = Config()
settings.py
from settings_config import config
SEND_LIVE_MAIL = config.SEND_LIVE_MAIL