Logstash conf error - amazon_es - amazon-web-services

I am trying to configure for the first time my logstash.conf file with an output to amazon_es.
My whole logstash.conf file is here:
input {
jdbc {
jdbc_connection_string => "jdbc:mysql://localhost:3306/testdb"
# The user we wish to execute our statement as
jdbc_user => "root"
jdbc_password => "root"
# The path to our downloaded jdbc driver
jdbc_driver_library => "/mnt/c/Users/xxxxxxxx/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
# our query
statement => "SELECT * FROM testtable"
}
}
output {
amazon_es {
hosts => ["search-xxxxx.eu-west-3.es.amazonaws.com"]
region => "eu-west-3"
aws_access_key_id => 'xxxxxxxxxxxxxxxxxxxxxx'
aws_secret_access_key => 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
index => "test-migrate"
document_type => "data"
}
}
I have 3 elements selected from my database, but the first time i run the script, only the first element is indexed in elastic search. The second time i run it, all 3 elements are indexed. I get the error each time i run logstash with this conf file.
EDIT 2:
[2018-02-08T14:31:18,270][INFO ][logstash.modules.scaffold] Initializing module {:module_name=>"fb_apache", :directory=>"/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/modules/fb_apache/configuration"}
[2018-02-08T14:31:18,279][DEBUG][logstash.plugins.registry] Adding plugin to the registry {:name=>"fb_apache", :type=>:modules, :class=>#<LogStash::Modules::Scaffold:0x47c515a1 #module_name="fb_apache", #directory="/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/modules/fb_apache/configuration", #kibana_version_parts=["6", "0", "0"]>}
[2018-02-08T14:31:18,286][INFO ][logstash.modules.scaffold] Initializing module {:module_name=>"netflow", :directory=>"/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/modules/netflow/configuration"}
[2018-02-08T14:31:18,287][DEBUG][logstash.plugins.registry] Adding plugin to the registry {:name=>"netflow", :type=>:modules, :class=>#<LogStash::Modules::Scaffold:0x6f1a5910 #module_name="netflow", #directory="/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/modules/netflow/configuration", #kibana_version_parts=["6", "0", "0"]>}
[2018-02-08T14:31:18,765][DEBUG][logstash.runner ] -------- Logstash Settings (* means modified) ---------
[2018-02-08T14:31:18,765][DEBUG][logstash.runner ] node.name: "DEVFE-AMT"
[2018-02-08T14:31:18,766][DEBUG][logstash.runner ] *path.config: "logstash.conf"
[2018-02-08T14:31:18,766][DEBUG][logstash.runner ] path.data: "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/data"
[2018-02-08T14:31:18,767][DEBUG][logstash.runner ] modules.cli: []
[2018-02-08T14:31:18,768][DEBUG][logstash.runner ] modules: []
[2018-02-08T14:31:18,768][DEBUG][logstash.runner ] modules_setup: false
[2018-02-08T14:31:18,768][DEBUG][logstash.runner ] config.test_and_exit: false
[2018-02-08T14:31:18,769][DEBUG][logstash.runner ] config.reload.automatic: false
[2018-02-08T14:31:18,769][DEBUG][logstash.runner ] config.reload.interval: 3000000000
[2018-02-08T14:31:18,769][DEBUG][logstash.runner ] config.support_escapes: false
[2018-02-08T14:31:18,770][DEBUG][logstash.runner ] metric.collect: true
[2018-02-08T14:31:18,770][DEBUG][logstash.runner ] pipeline.id: "main"
[2018-02-08T14:31:18,771][DEBUG][logstash.runner ] pipeline.system: false
[2018-02-08T14:31:18,771][DEBUG][logstash.runner ] pipeline.workers: 8
[2018-02-08T14:31:18,771][DEBUG][logstash.runner ] pipeline.output.workers: 1
[2018-02-08T14:31:18,772][DEBUG][logstash.runner ] pipeline.batch.size: 125
[2018-02-08T14:31:18,772][DEBUG][logstash.runner ] pipeline.batch.delay: 50
[2018-02-08T14:31:18,772][DEBUG][logstash.runner ] pipeline.unsafe_shutdown: false
[2018-02-08T14:31:18,772][DEBUG][logstash.runner ] pipeline.java_execution: false
[2018-02-08T14:31:18,773][DEBUG][logstash.runner ] pipeline.reloadable: true
[2018-02-08T14:31:18,773][DEBUG][logstash.runner ] path.plugins: []
[2018-02-08T14:31:18,773][DEBUG][logstash.runner ] config.debug: false
[2018-02-08T14:31:18,776][DEBUG][logstash.runner ] *log.level: "debug" (default: "info")
[2018-02-08T14:31:18,783][DEBUG][logstash.runner ] version: false
[2018-02-08T14:31:18,784][DEBUG][logstash.runner ] help: false
[2018-02-08T14:31:18,784][DEBUG][logstash.runner ] log.format: "plain"
[2018-02-08T14:31:18,786][DEBUG][logstash.runner ] http.host: "127.0.0.1"
[2018-02-08T14:31:18,793][DEBUG][logstash.runner ] http.port: 9600..9700
[2018-02-08T14:31:18,793][DEBUG][logstash.runner ] http.environment: "production"
[2018-02-08T14:31:18,794][DEBUG][logstash.runner ] queue.type: "memory"
[2018-02-08T14:31:18,796][DEBUG][logstash.runner ] queue.drain: false
[2018-02-08T14:31:18,804][DEBUG][logstash.runner ] queue.page_capacity: 67108864
[2018-02-08T14:31:18,809][DEBUG][logstash.runner ] queue.max_bytes: 1073741824
[2018-02-08T14:31:18,822][DEBUG][logstash.runner ] queue.max_events: 0
[2018-02-08T14:31:18,823][DEBUG][logstash.runner ] queue.checkpoint.acks: 1024
[2018-02-08T14:31:18,836][DEBUG][logstash.runner ] queue.checkpoint.writes: 1024
[2018-02-08T14:31:18,837][DEBUG][logstash.runner ] queue.checkpoint.interval: 1000
[2018-02-08T14:31:18,846][DEBUG][logstash.runner ] dead_letter_queue.enable: false
[2018-02-08T14:31:18,854][DEBUG][logstash.runner ] dead_letter_queue.max_bytes: 1073741824
[2018-02-08T14:31:18,859][DEBUG][logstash.runner ] slowlog.threshold.warn: -1
[2018-02-08T14:31:18,868][DEBUG][logstash.runner ] slowlog.threshold.info: -1
[2018-02-08T14:31:18,873][DEBUG][logstash.runner ] slowlog.threshold.debug: -1
[2018-02-08T14:31:18,885][DEBUG][logstash.runner ] slowlog.threshold.trace: -1
[2018-02-08T14:31:18,887][DEBUG][logstash.runner ] keystore.classname: "org.logstash.secret.store.backend.JavaKeyStore"
[2018-02-08T14:31:18,896][DEBUG][logstash.runner ] keystore.file: "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/config/logstash.keystore"
[2018-02-08T14:31:18,896][DEBUG][logstash.runner ] path.queue: "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/data/queue"
[2018-02-08T14:31:18,911][DEBUG][logstash.runner ] path.dead_letter_queue: "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/data/dead_letter_queue"
[2018-02-08T14:31:18,911][DEBUG][logstash.runner ] path.settings: "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/config"
[2018-02-08T14:31:18,926][DEBUG][logstash.runner ] path.logs: "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/logs"
[2018-02-08T14:31:18,926][DEBUG][logstash.runner ] --------------- Logstash Settings -------------------
[2018-02-08T14:31:18,998][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2018-02-08T14:31:19,067][DEBUG][logstash.agent ] Setting up metric collection
[2018-02-08T14:31:19,147][DEBUG][logstash.instrument.periodicpoller.os] Starting {:polling_interval=>5, :polling_timeout=>120}
[2018-02-08T14:31:19,293][DEBUG][logstash.instrument.periodicpoller.jvm] Starting {:polling_interval=>5, :polling_timeout=>120}
[2018-02-08T14:31:19,422][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2018-02-08T14:31:19,429][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2018-02-08T14:31:19,453][DEBUG][logstash.instrument.periodicpoller.persistentqueue] Starting {:polling_interval=>5, :polling_timeout=>120}
[2018-02-08T14:31:19,464][DEBUG][logstash.instrument.periodicpoller.deadletterqueue] Starting {:polling_interval=>5, :polling_timeout=>120}
[2018-02-08T14:31:19,519][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.2.0"}
[2018-02-08T14:31:19,537][DEBUG][logstash.agent ] Starting agent
[2018-02-08T14:31:19,565][DEBUG][logstash.agent ] Starting puma
[2018-02-08T14:31:19,580][DEBUG][logstash.agent ] Trying to start WebServer {:port=>9600}
[2018-02-08T14:31:19,654][DEBUG][logstash.config.source.local.configpathloader] Skipping the following files while reading config since they don't match the specified glob pattern {:files=>["/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/CONTRIBUTORS", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/Gemfile", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/Gemfile.lock", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/LICENSE", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/NOTICE.TXT", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/bin", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/config", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/data", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/lib", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/logs", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/logstash-core", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/logstash-core-plugin-api", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/modules", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/tools", "/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/vendor"]}
[2018-02-08T14:31:19,658][DEBUG][logstash.api.service ] [api-service] start
[2018-02-08T14:31:19,662][DEBUG][logstash.config.source.local.configpathloader] Reading config file {:config_file=>"/mnt/c/Users/anthony.maffert/l/logstash-6.2.0/logstash.conf"}
[2018-02-08T14:31:19,770][DEBUG][logstash.agent ] Converging pipelines state {:actions_count=>1}
[2018-02-08T14:31:19,776][DEBUG][logstash.agent ] Executing action {:action=>LogStash::PipelineAction::Create/pipeline_id:main}
[2018-02-08T14:31:19,948][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2018-02-08T14:31:21,157][DEBUG][logstash.plugins.registry] On demand adding plugin to the registry {:name=>"jdbc", :type=>"input", :class=>LogStash::Inputs::Jdbc}
[2018-02-08T14:31:21,557][DEBUG][logstash.plugins.registry] On demand adding plugin to the registry {:name=>"plain", :type=>"codec", :class=>LogStash::Codecs::Plain}
[2018-02-08T14:31:21,580][DEBUG][logstash.codecs.plain ] config LogStash::Codecs::Plain/#id = "plain_32fc0754-0187-437b-9d4d-2611eaba9a45"
[2018-02-08T14:31:21,581][DEBUG][logstash.codecs.plain ] config LogStash::Codecs::Plain/#enable_metric = true
[2018-02-08T14:31:21,581][DEBUG][logstash.codecs.plain ] config LogStash::Codecs::Plain/#charset = "UTF-8"
[2018-02-08T14:31:21,612][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_connection_string = "jdbc:mysql://localhost:3306/testdb"
[2018-02-08T14:31:21,613][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_user = "root"
[2018-02-08T14:31:21,616][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_password = <password>
[2018-02-08T14:31:21,623][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_driver_library = "/mnt/c/Users/anthony.maffert/Desktop/DocumentsUbuntu/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar"
[2018-02-08T14:31:21,624][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_driver_class = "com.mysql.jdbc.Driver"
[2018-02-08T14:31:21,631][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#statement = "SELECT * FROM testtable"
[2018-02-08T14:31:21,633][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#id = "ff7529f734e0813846bc8e3b2bcf0794d99ff5cb61b947e0497922b083b3851a"
[2018-02-08T14:31:21,647][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#enable_metric = true
[2018-02-08T14:31:21,659][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#codec = <LogStash::Codecs::Plain id=>"plain_32fc0754-0187-437b-9d4d-2611eaba9a45", enable_metric=>true, charset=>"UTF-8">
[2018-02-08T14:31:21,663][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#add_field = {}
[2018-02-08T14:31:21,663][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_paging_enabled = false
[2018-02-08T14:31:21,678][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_page_size = 100000
[2018-02-08T14:31:21,679][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_validate_connection = false
[2018-02-08T14:31:21,693][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_validation_timeout = 3600
[2018-02-08T14:31:21,694][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#jdbc_pool_timeout = 5
[2018-02-08T14:31:21,708][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#sequel_opts = {}
[2018-02-08T14:31:21,708][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#sql_log_level = "info"
[2018-02-08T14:31:21,715][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#connection_retry_attempts = 1
[2018-02-08T14:31:21,716][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#connection_retry_attempts_wait_time = 0.5
[2018-02-08T14:31:21,721][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#parameters = {}
[2018-02-08T14:31:21,723][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#last_run_metadata_path = "/home/maffer_a/.logstash_jdbc_last_run"
[2018-02-08T14:31:21,731][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#use_column_value = false
[2018-02-08T14:31:21,731][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#tracking_column_type = "numeric"
[2018-02-08T14:31:21,745][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#clean_run = false
[2018-02-08T14:31:21,746][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#record_last_run = true
[2018-02-08T14:31:21,808][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#lowercase_column_names = true
[2018-02-08T14:31:21,808][DEBUG][logstash.inputs.jdbc ] config LogStash::Inputs::Jdbc/#columns_charset = {}
[2018-02-08T14:31:21,830][DEBUG][logstash.plugins.registry] On demand adding plugin to the registry {:name=>"stdout", :type=>"output", :class=>LogStash::Outputs::Stdout}
[2018-02-08T14:31:21,893][DEBUG][logstash.plugins.registry] On demand adding plugin to the registry {:name=>"json_lines", :type=>"codec", :class=>LogStash::Codecs::JSONLines}
[2018-02-08T14:31:21,901][DEBUG][logstash.codecs.jsonlines] config LogStash::Codecs::JSONLines/#id = "json_lines_e27ae5ff-5352-4061-9415-c75234fafc91"
[2018-02-08T14:31:21,902][DEBUG][logstash.codecs.jsonlines] config LogStash::Codecs::JSONLines/#enable_metric = true
[2018-02-08T14:31:21,902][DEBUG][logstash.codecs.jsonlines] config LogStash::Codecs::JSONLines/#charset = "UTF-8"
[2018-02-08T14:31:21,905][DEBUG][logstash.codecs.jsonlines] config LogStash::Codecs::JSONLines/#delimiter = "\n"
[2018-02-08T14:31:21,915][DEBUG][logstash.outputs.stdout ] config LogStash::Outputs::Stdout/#codec = <LogStash::Codecs::JSONLines id=>"json_lines_e27ae5ff-5352-4061-9415-c75234fafc91", enable_metric=>true, charset=>"UTF-8", delimiter=>"\n">
[2018-02-08T14:31:21,924][DEBUG][logstash.outputs.stdout ] config LogStash::Outputs::Stdout/#id = "4fb47c5631fa87c6a839a6f476077e9fa55456c479eee7251568f325435f3bbc"
[2018-02-08T14:31:21,929][DEBUG][logstash.outputs.stdout ] config LogStash::Outputs::Stdout/#enable_metric = true
[2018-02-08T14:31:21,939][DEBUG][logstash.outputs.stdout ] config LogStash::Outputs::Stdout/#workers = 1
[2018-02-08T14:31:23,217][DEBUG][logstash.plugins.registry] On demand adding plugin to the registry {:name=>"amazon_es", :type=>"output", :class=>LogStash::Outputs::AmazonES}
[2018-02-08T14:31:23,287][DEBUG][logstash.codecs.plain ] config LogStash::Codecs::Plain/#id = "plain_673a059d-4236-4f10-ba64-43ee33e050e4"
[2018-02-08T14:31:23,288][DEBUG][logstash.codecs.plain ] config LogStash::Codecs::Plain/#enable_metric = true
[2018-02-08T14:31:23,288][DEBUG][logstash.codecs.plain ] config LogStash::Codecs::Plain/#charset = "UTF-8"
[2018-02-08T14:31:23,294][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#hosts = ["search-XXXXXXXXXXXXXX.eu-west-3.es.amazonaws.com"]
[2018-02-08T14:31:23,294][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#region = "eu-west-3"
[2018-02-08T14:31:23,295][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#aws_access_key_id = "XXXXXXXXXXX"
[2018-02-08T14:31:23,295][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#aws_secret_access_key = "XXXXXXXXXXXXX"
[2018-02-08T14:31:23,296][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#index = "test-migrate"
[2018-02-08T14:31:23,299][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#document_type = "data"
[2018-02-08T14:31:23,299][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#id = "7c6401c2f72c63f8d359a42a2f440a663303cb2cbfefff8fa32d64a6f571a527"
[2018-02-08T14:31:23,306][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#enable_metric = true
[2018-02-08T14:31:23,310][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#codec = <LogStash::Codecs::Plain id=>"plain_673a059d-4236-4f10-ba64-43ee33e050e4", enable_metric=>true, charset=>"UTF-8">
[2018-02-08T14:31:23,310][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#workers = 1
[2018-02-08T14:31:23,310][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#manage_template = true
[2018-02-08T14:31:23,317][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#template_name = "logstash"
[2018-02-08T14:31:23,325][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#template_overwrite = false
[2018-02-08T14:31:23,326][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#port = 443
[2018-02-08T14:31:23,332][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#protocol = "https"
[2018-02-08T14:31:23,333][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#flush_size = 500
[2018-02-08T14:31:23,335][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#idle_flush_time = 1
[2018-02-08T14:31:23,340][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#action = "index"
[2018-02-08T14:31:23,341][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#path = "/"
[2018-02-08T14:31:23,341][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#max_retries = 3
[2018-02-08T14:31:23,341][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#retry_max_items = 5000
[2018-02-08T14:31:23,342][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#retry_max_interval = 5
[2018-02-08T14:31:23,342][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#doc_as_upsert = false
[2018-02-08T14:31:23,342][DEBUG][logstash.outputs.amazones] config LogStash::Outputs::AmazonES/#upsert = ""
[2018-02-08T14:31:23,426][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>8, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[2018-02-08T14:31:23,476][DEBUG][logstash.outputs.amazones] Normalizing http path {:path=>"/", :normalized=>"/"}
[2018-02-08T14:31:23,791][INFO ][logstash.outputs.amazones] Automatic template management enabled {:manage_template=>"true"}
[2018-02-08T14:31:23,835][INFO ][logstash.outputs.amazones] Using mapping template {:template=>{"template"=>"logstash-*", "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"_all"=>{"enabled"=>true, "omit_norms"=>true}, "dynamic_templates"=>[{"message_field"=>{"match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"string", "index"=>"analyzed", "omit_norms"=>true}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"string", "index"=>"analyzed", "omit_norms"=>true, "fields"=>{"raw"=>{"type"=>"string", "index"=>"not_analyzed", "ignore_above"=>256}}}}}], "properties"=>{"#version"=>{"type"=>"string", "index"=>"not_analyzed"}, "geoip"=>{"type"=>"object", "dynamic"=>true, "properties"=>{"location"=>{"type"=>"geo_point"}}}}}}}}
[2018-02-08T14:31:24,480][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2018-02-08T14:31:24,482][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2018-02-08T14:31:25,242][ERROR][logstash.outputs.amazones] Failed to install template: [400] {"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"No handler for type [string] declared on field [#version]"}],"type":"mapper_parsing_exception","reason":"Failed to parse mapping [_default_]: No handler for type [string] declared on field [#version]","caused_by":{"type":"mapper_parsing_exception","reason":"No handler for type [string] declared on field [#version]"}},"status":400}
[2018-02-08T14:31:25,246][INFO ][logstash.outputs.amazones] New Elasticsearch output {:hosts=>["search-XXXXXXXXXXXX.eu-west-3.es.amazonaws.com"], :port=>443}
[2018-02-08T14:31:25,619][INFO ][logstash.pipeline ] Pipeline started succesfully {:pipeline_id=>"main", :thread=>"#<Thread:0x42da9cf8 run>"}
[2018-02-08T14:31:25,712][INFO ][logstash.agent ] Pipelines running {:count=>1, :pipelines=>["main"]}
Thu Feb 08 14:31:26 GMT 2018 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
[2018-02-08T14:31:26,716][INFO ][logstash.inputs.jdbc ] (0.008417s) SELECT version()
[2018-02-08T14:31:26,858][INFO ][logstash.inputs.jdbc ] (0.002332s) SELECT count(*) AS `count` FROM (SELECT * FROM testtable) AS `t1` LIMIT 1
[2018-02-08T14:31:26,863][DEBUG][logstash.inputs.jdbc ] Executing JDBC query {:statement=>"SELECT * FROM testtable", :parameters=>{:sql_last_value=>2018-02-08 14:23:01 UTC}, :count=>3}
[2018-02-08T14:31:26,873][INFO ][logstash.inputs.jdbc ] (0.000842s) SELECT * FROM testtable
[2018-02-08T14:31:27,022][DEBUG][logstash.inputs.jdbc ] Closing {:plugin=>"LogStash::Inputs::Jdbc"}
[2018-02-08T14:31:27,023][DEBUG][logstash.pipeline ] filter received {"event"=>{"#timestamp"=>2018-02-08T14:31:26.918Z, "personid"=>4004, "city"=>"Cape Town", "#version"=>"1", "firstname"=>"Richard", "lastname"=>"Baron"}}
[2018-02-08T14:31:27,023][DEBUG][logstash.pipeline ] filter received {"event"=>{"#timestamp"=>2018-02-08T14:31:26.919Z, "personid"=>4003, "city"=>"Cape Town", "#version"=>"1", "firstname"=>"Sharon", "lastname"=>"McWell"}}
[2018-02-08T14:31:27,023][DEBUG][logstash.pipeline ] filter received {"event"=>{"#timestamp"=>2018-02-08T14:31:26.890Z, "personid"=>4005, "city"=>"Cape Town", "#version"=>"1", "firstname"=>"Jaques", "lastname"=>"Kallis"}}
[2018-02-08T14:31:27,032][DEBUG][logstash.pipeline ] output received {"event"=>{"#timestamp"=>2018-02-08T14:31:26.918Z, "personid"=>4004, "city"=>"Cape Town", "#version"=>"1", "firstname"=>"Richard", "lastname"=>"Baron"}}
[2018-02-08T14:31:27,035][DEBUG][logstash.pipeline ] output received {"event"=>{"#timestamp"=>2018-02-08T14:31:26.890Z, "personid"=>4005, "city"=>"Cape Town", "#version"=>"1", "firstname"=>"Jaques", "lastname"=>"Kallis"}}
[2018-02-08T14:31:27,040][DEBUG][logstash.pipeline ] output received {"event"=>{"#timestamp"=>2018-02-08T14:31:26.919Z, "personid"=>4003, "city"=>"Cape Town", "#version"=>"1", "firstname"=>"Sharon", "lastname"=>"McWell"}}
[2018-02-08T14:31:27,047][DEBUG][logstash.pipeline ] Pushing flush onto pipeline {:pipeline_id=>"main", :thread=>"#<Thread:0x42da9cf8 sleep>"}
[2018-02-08T14:31:27,053][DEBUG][logstash.pipeline ] Shutting down filter/output workers {:pipeline_id=>"main", :thread=>"#<Thread:0x42da9cf8 run>"}
[2018-02-08T14:31:27,062][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x3f1899bb#[main]>worker0 run>"}
[2018-02-08T14:31:27,069][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x41529ca4#[main]>worker1 run>"}
[2018-02-08T14:31:27,070][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x1c56e6d6#[main]>worker2 run>"}
[2018-02-08T14:31:27,083][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x2f767b45#[main]>worker3 sleep>"}
[2018-02-08T14:31:27,083][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x2017b165#[main]>worker4 run>"}
[2018-02-08T14:31:27,098][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x65923ecd#[main]>worker5 sleep>"}
[2018-02-08T14:31:27,099][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0x1714b839#[main]>worker6 run>"}
[2018-02-08T14:31:27,113][DEBUG][logstash.pipeline ] Pushing shutdown {:pipeline_id=>"main", :thread=>"#<Thread:0xcbee48c#[main]>worker7 run>"}
[2018-02-08T14:31:27,116][DEBUG][logstash.pipeline ] Shutdown waiting for worker thread {:pipeline_id=>"main", :thread=>"#<Thread:0x3f1899bb#[main]>worker0 run>"}
{"#timestamp":"2018-02-08T14:31:26.919Z","personid":4003,"city":"Cape Town","#version":"1","firstname":"Sharon","lastname":"McWell"}
{"#timestamp":"2018-02-08T14:31:26.918Z","personid":4004,"city":"Cape Town","#version":"1","firstname":"Richard","lastname":"Baron"}
{"#timestamp":"2018-02-08T14:31:26.890Z","personid":4005,"city":"Cape Town","#version":"1","firstname":"Jaques","lastname":"Kallis"}
[2018-02-08T14:31:27,153][DEBUG][logstash.pipeline ] Shutdown waiting for worker thread {:pipeline_id=>"main", :thread=>"#<Thread:0x41529ca4#[main]>worker1 run>"}
[2018-02-08T14:31:27,158][DEBUG][logstash.pipeline ] Shutdown waiting for worker thread {:pipeline_id=>"main", :thread=>"#<Thread:0x1c56e6d6#[main]>worker2 run>"}
[2018-02-08T14:31:27,200][DEBUG][logstash.outputs.amazones] Flushing output {:outgoing_count=>1, :time_since_last_flush=>1.927723, :outgoing_events=>{nil=>[["index", {:_id=>nil, :_index=>"test-migrate", :_type=>"data", :_routing=>nil}, #<LogStash::Event:0x1bacf548>]]}, :batch_timeout=>1, :force=>nil, :final=>nil}
[2018-02-08T14:31:27,207][DEBUG][logstash.pipeline ] Shutdown waiting for worker thread {:pipeline_id=>"main", :thread=>"#<Thread:0x2f767b45#[main]>worker3 sleep>"}
[2018-02-08T14:31:27,251][DEBUG][logstash.instrument.periodicpoller.os] Stopping
[2018-02-08T14:31:27,271][DEBUG][logstash.instrument.periodicpoller.jvm] Stopping
[2018-02-08T14:31:27,273][DEBUG][logstash.instrument.periodicpoller.persistentqueue] Stopping
[2018-02-08T14:31:27,281][DEBUG][logstash.instrument.periodicpoller.deadletterqueue] Stopping
[2018-02-08T14:31:27,356][DEBUG][logstash.agent ] Shutting down all pipelines {:pipelines_count=>1}
[2018-02-08T14:31:27,362][DEBUG][logstash.agent ] Converging pipelines state {:actions_count=>1}
[2018-02-08T14:31:27,363][DEBUG][logstash.agent ] Executing action {:action=>LogStash::PipelineAction::Stop/pipeline_id:main}
[2018-02-08T14:31:27,385][DEBUG][logstash.pipeline ] Stopping inputs {:pipeline_id=>"main", :thread=>"#<Thread:0x42da9cf8 sleep>"}
[2018-02-08T14:31:27,389][DEBUG][logstash.inputs.jdbc ] Stopping {:plugin=>"LogStash::Inputs::Jdbc"}
[2018-02-08T14:31:27,399][DEBUG][logstash.pipeline ] Stopped inputs {:pipeline_id=>"main", :thread=>"#<Thread:0x42da9cf8 sleep>"}

You should try to add the index template yourself. Copy this ES 6.x template on your local file system and then add the template setting to your amazon_es output, it should work:
amazon_es {
hosts => ["search-xxxxx.eu-west-3.es.amazonaws.com"]
region => "eu-west-3"
aws_access_key_id => 'xxxxxxxxxxxxxxxxxxxxxx'
aws_secret_access_key => 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
index => "test-migrate"
document_type => "data"
template => '/path/to/template.json'
}

Related

No response from remote for outbound association. Handshake timed out after [15000 ms] Error in Akka Clustering

I am having a 3 node Akka Cluster and 3 actors are running on each node of the cluster. The cluster is running fine for some 2 hours but after 2 hours I am getting the following warning:
[INFO] [06/07/2018 15:08:51.923] [ClusterSystem-akka.remote.default-remote-dispatcher-6] [akka.tcp://ClusterSystem#192.168.2.8:2552/system/transports/akkaprotocolmanager.tcp0/akkaProtocol-tcp%3A%2F%2FClusterSystem%40192.168.2.7%3A2552-112] No response from remote for outbound association. Handshake timed out after [15000 ms].
[WARN] [06/07/2018 15:08:51.923] [ClusterSystem-akka.remote.default-remote-dispatcher-18] [akka.tcp://ClusterSystem#192.168.2.8:2552/system/endpointManager/reliableEndpointWriter-akka.tcp%3A%2F%2FClusterSystem%40192.168.2.7%3A2552-8] Association with remote system [akka.tcp://ClusterSystem#192.168.2.7:2552] has failed, address is now gated for [5000] ms. Reason: [Association failed with [akka.tcp://ClusterSystem#192.168.2.7:2552]] Caused by: [No response from remote for outbound association. Handshake timed out after [15000 ms].]
[WARN] [06/07/2018 16:07:06.347] [ClusterSystem-akka.actor.default-dispatcher-101] [akka.remote.PhiAccrualFailureDetector#3895fa5b] heartbeat interval is growing too large: 2839 millis
Edit: The Akka CLuster Managemant Response from the API
{
"selfNode": "akka.tcp://ClusterSystem#127.0.0.1:2551",
"leader": "akka.tcp://ClusterSystem#127.0.0.1:2551",
"oldest": "akka.tcp://ClusterSystem#127.0.0.1:2551",
"unreachable": [
{
"node": "akka.tcp://ClusterSystem#127.0.0.1:2552",
"observedBy": [
"akka.tcp://ClusterSystem#127.0.0.1:2551",
"akka.tcp://ClusterSystem#127.0.0.1:2560"
]
}
],
"members": [
{
"node": "akka.tcp://ClusterSystem#127.0.0.1:2551",
"nodeUid": "105742380",
"status": "Up",
"roles": [
"Frontend",
"dc-default"
]
},
{
"node": "akka.tcp://ClusterSystem#127.0.0.1:2552",
"nodeUid": "-150160059",
"status": "Up",
"roles": [
"RuleExecutor",
"dc-default"
]
},
{
"node": "akka.tcp://ClusterSystem#127.0.0.1:2560",
"nodeUid": "-158907672",
"status": "Up",
"roles": [
"RuleExecutor",
"dc-default"
]
}
]
}
**Edit1: ** Cluster Setup Configuration and Failure Detector Configuration
cluster {
jmx.multi-mbeans-in-same-jvm = on
roles = ["Frontend"]
seed-nodes = [
"akka.tcp://ClusterSystem#192.168.2.9:2551"]
auto-down-unreachable-after = off
failure-detector {
# FQCN of the failure detector implementation.
# It must implement akka.remote.FailureDetector and have
# a public constructor with a com.typesafe.config.Config and
# akka.actor.EventStream parameter.
implementation-class = "akka.remote.PhiAccrualFailureDetector"
# How often keep-alive heartbeat messages should be sent to each connection.
# heartbeat-interval = 10 s
# Defines the failure detector threshold.
# A low threshold is prone to generate many wrong suspicions but ensures
# a quick detection in the event of a real crash. Conversely, a high
# threshold generates fewer mistakes but needs more time to detect
# actual crashes.
threshold = 18.0
# Number of the samples of inter-heartbeat arrival times to adaptively
# calculate the failure timeout for connections.
max-sample-size = 1000
# Minimum standard deviation to use for the normal distribution in
# AccrualFailureDetector. Too low standard deviation might result in
# too much sensitivity for sudden, but normal, deviations in heartbeat
# inter arrival times.
min-std-deviation = 100 ms
# Number of potentially lost/delayed heartbeats that will be
# accepted before considering it to be an anomaly.
# This margin is important to be able to survive sudden, occasional,
# pauses in heartbeat arrivals, due to for example garbage collect or
# network drop.
acceptable-heartbeat-pause = 15 s
# Number of member nodes that each member will send heartbeat messages to,
# i.e. each node will be monitored by this number of other nodes.
monitored-by-nr-of-members = 2
# After the heartbeat request has been sent the first failure detection
# will start after this period, even though no heartbeat message has
# been received.
expected-response-after = 10 s
}
}

AWS mounting old volume from old instance to new instance

I did detach old volume from old instance and attach old volume to new instance in aws console.
and I followed this question: Add EBS to Ubuntu EC2 Instance
When I command 'sudo mount /vol'
It shows me the error :
mount: wrong fs type, bad option, bad superblock on /dev/xvdf,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so.
The output of 'dmesg | tail' is below
[ 9.158108] audit: type=1400 audit(1481970181.964:8):
apparmor="STATUS" operation="profile_load" profile="unconfined"
name="/usr/lib/NetworkManager/nm-dhcp-helper" pid=705
comm="apparmor_parser" [ 9.158434] audit: type=1400
audit(1481970181.964:9): apparmor="STATUS" operation="profile_load"
profile="unconfined" name="/usr/lib/connman/scripts/dhclient-script"
pid=705 comm="apparmor_parser" [ 9.178292] audit: type=1400
audit(1481970181.984:10): apparmor="STATUS" operation="profile_load"
profile="unconfined" name="/usr/bin/lxc-start" pid=761
comm="apparmor_parser" [ 9.341874] audit: type=1400
audit(1481970182.148:11): apparmor="STATUS" operation="profile_load"
profile="unconfined" name="/usr/lib/lxd/lxd-bridge-proxy" pid=763
comm="apparmor_parser" [ 11.673698] random: nonblocking pool is
initialized [ 11.766032] EXT4-fs (xvda1): resizing filesystem from
2094474 to 2095139 blocks [ 11.766371] EXT4-fs (xvda1): resized
filesystem to 2095139 [ 12.716500] cgroup: new mount options do not
match the existing superblock, will be ignored [ 236.029463]
blkfront: xvdf: barrier or flush: disabled; persistent grants:
disabled; indirect descriptors: enabled; [ 236.038716] xvdf: xvdf1
Old volume Attachment information in AWS console is below :
VOLUME_ID (NEW_INSTANCE_NAME):/dev/sdf (attached)
Your volume has a partition table, as evidenced by...
[ 236.038716] xvdf: xvdf1
...so you need to mount the partition, not the volume.
sudo mount /dev/xvdf1 /path/to/mount-point
You can also see this using lsblk.

Can't run dev web site using python manage.py

I can't run app using command line. After add more configuration on settings.py file
python manage.py runserver
usage: manage.py [-h] [--config-dir DIR] [--config-file PATH] [--version]
manage.py: error: unrecognized arguments: runserver
This is
configloader.py
from oslo_config import cfg
# from oslo.config import cfg
rabbitmq_opt_group = cfg.OptGroup(name='rabbitmq', title='Configuration for rabbitmq')
rabbitmq_opts = [
cfg.StrOpt('server_ip', default='127.0.0.1', help='ip of rabbitmq server'),
cfg.IntOpt('server_port', default=5672, help='port of rabbitmq server'),
cfg.FloatOpt('retry_time', default=0.2, help='interval for retry connect to server'),
cfg.FloatOpt('interval_increase', default=0.2, help='increase unit after connect to server fail'),
cfg.IntOpt('max_increase', default=10, help='Max sleep time when try to connect to server'),
cfg.StrOpt('username', default='guest', help='username of account to connect rabbitmq server'),
cfg.StrOpt('password', default='guest', help='password of account to connect rabbitmq server'),
]
worker_opt_group = cfg.OptGroup(name='worker', title='Configuration of worker')
worker_opts = [
cfg.IntOpt('max_worker', default='10', help='max worker of service'),
cfg.IntOpt('qos_worker', default='50', help='Max message can consumer by worker in concurrently'),
cfg.StrOpt('queue_name', default='CTL_MJPEG', help='Listening queue name')
]
keep_alive_group = cfg.OptGroup(name='keepaliveworker', title='Configuration of keep alive worker')
keep_alive_opts = [
cfg.IntOpt('max_worker', default='10', help='max worker of keep alive service'),
cfg.IntOpt('qos_worker', default='50', help='Max message can consumer by worker in concurrently'),
cfg.StrOpt('queue_name', default='CTL_MJPEG_RECOVERY', help='listening queue name')
]
monitor_queue_group = cfg.OptGroup(name='queuemonitor', title='Configuration of queue monitor')
monitor_queue_opts = [
cfg.IntOpt('max_worker', default='1', help='max worker of keep alive service'),
cfg.StrOpt('queue_name', default='MONITOR_QUEUE', help='Queue name using receiver event'),
cfg.IntOpt('qos_worker', default='50', help='Max message can consumer by worker in concurrently'),
cfg.StrOpt('monitor_topic', default='queue.*',
help='Monitor queue when queue have been deleted(recovery function)'),
]
log_group = cfg.OptGroup(name='logcfg', title='Log Configuration of queue monitor')
log_opts = [
cfg.StrOpt('log_cfg_dir', default='/etc/cloudmjpeg/log.conf.d', help='Directory save log config'),
cfg.StrOpt('monitor_log', help='log configuration for monitor server'),
cfg.StrOpt('worker_log', help='log configuration for monitor server'),
cfg.StrOpt('queue_monitor_log', help='log configuration for queue monitor server'),
cfg.StrOpt('keep_alive_log', help='log configuration for monitor server'),
]
portal_group = cfg.OptGroup(name='portal', title='Configuration about interact with portal')
portal_opts = [
cfg.BoolOpt('send_file_info', default=False, help='Enable send file info to portal'),
]
alarming_group = cfg.OptGroup(name='alarming', title='Configuration about alarming to portal to send mail to customer')
alarming_opts = [
cfg.BoolOpt('file_size', default=False, help='Enable alarming for file size'),
cfg.BoolOpt('camera_status_change', default=False, help='Enable alarming when status of camera change')
]
monitor_group = cfg.OptGroup(name='monitor', title='Configuration using keep alive data')
monitor_opts = [
cfg.IntOpt('check_interval', default=60, help='Interval check data'),
cfg.StrOpt('email_subject', default='Keep Alive Monitor', help='Subject of Email send to admin'),
cfg.IntOpt('check_alive', default=60, help='If start and end time have interval is check alive, then worker died')
]
ffserver_group = cfg.OptGroup(name='ffserver', title='Configuration for ffserver')
ffserver_opts = [
cfg.IntOpt(name='ffm_file_size', default=500, help='Size of ffm temp. Unit kilo bytes'),
cfg.StrOpt(name='ffm_dir', default='/tmp/ffmpeg-temp/', help='FFm temp file location'),
]
def parser(conf):
CONF = cfg.CONF
CONF.register_group(rabbitmq_opt_group)
CONF.register_opts(rabbitmq_opts, rabbitmq_opt_group)
CONF.register_group(worker_opt_group)
CONF.register_opts(worker_opts, worker_opt_group)
CONF.register_group(keep_alive_group)
CONF.register_opts(keep_alive_opts, keep_alive_group)
CONF.register_group(monitor_queue_group)
CONF.register_opts(monitor_queue_opts, monitor_queue_group)
CONF.register_group(log_group)
CONF.register_opts(log_opts, log_group)
CONF.register_group(portal_group)
CONF.register_opts(portal_opts, portal_group)
CONF.register_group(alarming_group)
CONF.register_opts(alarming_opts, alarming_group)
CONF.register_group(monitor_group)
CONF.register_opts(monitor_opts, monitor_group)
CONF.register_group(ffserver_group)
CONF.register_opts(ffserver_opts, ffserver_group)
CONF(default_config_files=conf)
return CONF
def get_configuration():
CONF = parser(['/etc/cloudmjpeg/cloudmjpeg.conf'])
return CONF
I add my configuration to settings.py. It will load my configuration from /etc/cloudmjpeg/cloudmjpeg.conf.
from cloudmjpeg.utils import configloader
MJPEG_CONF = configloader.get_configuration()
Howerver I remove load my configruration into settings.py. It will run app using python manage.py command line perfect. So that I think when I load my configuration in settings.py, it will be error. Why?. I do not any idea to resolve problem. Please help me.
Thanks

Why does this use of DSC File Resource using shared folder only succeed once?

I am testing the use of the DSC File resource for copying a directory of files from a shared folder to another machine.
My problem is that this works once but running the same code a second time fails. If I restart the target machine the script will again run correctly but fail a second time.
Can anyone tell me why this is and whether I need to be doing something differently?
The machines I am using are called:
"S1" => Server 2012 R2 (This has the shared folder and user setup for read access)
"S2" => Virtual Server 2012 R2 running on S1 (This is the target machine)
The script I am running is this:
$ConfigurationData = #{
AllNodes = #(
#{
NodeName="*"
PSDscAllowPlainTextPassword = $true
}
#{
NodeName = "S2"
}
)
}
Configuration Test {
param (
[Parameter(Mandatory=$true)]
[PSCredential]$credential
)
Node $AllNodes.NodeName {
File DirectoryCopy {
DestinationPath = "C:\Shared\Files"
SourcePath = "\\S1\Shared\Files"
Ensure = "present"
Credential = $credential
Type = "Directory"
Recurse = $true
}
}
}
$username = "dscUser"
$password="dscPassword!"|ConvertTo-SecureString -AsPlainText -Force
$credential = New-Object System.Management.Automation.PsCredential("$username",$password)
Test -OutputPath "C:\Scripts" -ConfigurationData $ConfigurationData -Credential $credential
Start-DscConfiguration -ComputerName S2 -path "C:\Scripts" -Verbose -Wait
The output of running this twice are this:
PS C:\repo> C:\Scripts\Test.ps1
Directory: C:\Scripts
Mode LastWriteTime Length Name
---- ------------- ------ ----
-a--- 16/10/2015 11:12 1646 S2.mof
VERBOSE: Perform operation 'Invoke CimMethod' with following parameters, ''methodName' = SendConfigurationApply,'className' = MSFT_DSCLocalConfigurationManager,'namespaceName' = ro
ot/Microsoft/Windows/DesiredStateConfiguration'.
VERBOSE: An LCM method call arrived from computer S1 with user sid S-1-5-21-1747786857-595474378-2516325245-500.
VERBOSE: [S2]: LCM: [ Start Set ]
VERBOSE: [S2]: LCM: [ Start Resource ] [[File]DirectoryCopy]
VERBOSE: [S2]: LCM: [ Start Test ] [[File]DirectoryCopy]
VERBOSE: [S2]: [[File]DirectoryCopy] Building file list from cache.
VERBOSE: [S2]: LCM: [ End Test ] [[File]DirectoryCopy] in 0.2500 seconds.
VERBOSE: [S2]: LCM: [ Start Set ] [[File]DirectoryCopy]
VERBOSE: [S2]: [[File]DirectoryCopy] Building file list from cache.
VERBOSE: [S2]: LCM: [ End Set ] [[File]DirectoryCopy] in 0.2660 seconds.
VERBOSE: [S2]: LCM: [ End Resource ] [[File]DirectoryCopy]
VERBOSE: [S2]: LCM: [ End Set ]
VERBOSE: [S2]: LCM: [ End Set ] in 0.6720 seconds.
VERBOSE: Operation 'Invoke CimMethod' complete.
VERBOSE: Time taken for configuration job to complete is 1.59 seconds
PS C:\repo> C:\Scripts\Test.ps1
Directory: C:\Scripts
Mode LastWriteTime Length Name
---- ------------- ------ ----
-a--- 16/10/2015 11:13 1646 S2.mof
VERBOSE: Perform operation 'Invoke CimMethod' with following parameters, ''methodName' = SendConfigurationApply,'className' = MSFT_DSCLocalConfigurationManager,'namespaceName' = ro
ot/Microsoft/Windows/DesiredStateConfiguration'.
VERBOSE: An LCM method call arrived from computer S1 with user sid S-1-5-21-1747786857-595474378-2516325245-500.
VERBOSE: [S2]: LCM: [ Start Set ]
VERBOSE: [S2]: LCM: [ Start Resource ] [[File]DirectoryCopy]
VERBOSE: [S2]: LCM: [ Start Test ] [[File]DirectoryCopy]
VERBOSE: [S2]: [[File]DirectoryCopy] An error occurs when accessing the network share with the specified credential. Please make sure the credential is c
orrect and the network share is accessible. Note that Credential should not be specified with the local path.
VERBOSE: [S2]: [[File]DirectoryCopy] The related file/directory is: \\S1\Shared\Files.
A specified logon session does not exist. It may already have been terminated. An error occurs when accessing the network share with the specified credential. Please make sure
the credential is correct and the network share is accessible. Note that Credential should not be specified with the local path. The related file/directory is: \\S1\Shared\Files.
+ CategoryInfo : NotSpecified: (:) [], CimException
+ FullyQualifiedErrorId : Windows System Error 1312
+ PSComputerName : S2
VERBOSE: [S2]: LCM: [ End Set ]
LCM failed to move one or more resources to their desired state.
+ CategoryInfo : NotSpecified: (root/Microsoft/...gurationManager:String) [], CimException
+ FullyQualifiedErrorId : MI RESULT 1
+ PSComputerName : S2
VERBOSE: Operation 'Invoke CimMethod' complete.
VERBOSE: Time taken for configuration job to complete is 3.027 seconds
Any help with this is appreciated as its driving me nuts.
Thanks.
I (think) I have found the answer.
When specifying the username I should have used 'S1\dscUser' instead of 'dscUser'.
These machines are not in a domain.

celerybeat automatically disables periodic task

I'd like to create a periodic task for celery using django-celery's admin interface. I have a task set up which runs great when called manually or by script. It just doesn't work through celerybeat. According to the debug logs the task is set to enabled = False on first retrieval and I wonder why.
When adding the periodic task and passing [1, False] as positional arguments, the task is automatically disabled and I don't see any further output. When added without arguments the task is executed but raises an exception instantly because I didn't supply the needed arguments (makes sense).
Does anyone see what's the problem here?
Thanks in advance.
This is the output after supplying arguments:
[DEBUG/Beat] SELECT "djcelery_periodictask"."id", [...]
FROM "djcelery_periodictask"
WHERE "djcelery_periodictask"."enabled" = true ; args=(True,)
[DEBUG/Beat] SELECT "djcelery_intervalschedule"."id", [...]
FROM "djcelery_intervalschedule"
WHERE "djcelery_intervalschedule"."id" = 3 ; args=(3,)
[DEBUG/Beat] SELECT (1) AS "a"
FROM "djcelery_periodictask"
WHERE "djcelery_periodictask"."id" = 3 LIMIT 1; args=(3,)
[DEBUG/Beat] UPDATE "djcelery_periodictask"
SET "name" = E'<taskname>', "task" = E'<task.module.path>',
"interval_id" = 3, "crontab_id" = NULL,
"args" = E'[1, False,]', "kwargs" = E'{}', "queue" = NULL,
"exchange" = NULL, "routing_key" = NULL,
"expires" = NULL, "enabled" = false,
"last_run_at" = E'2011-05-25 00:45:23.242387', "total_run_count" = 9,
"date_changed" = E'2011-05-25 09:28:06.201148'
WHERE "djcelery_periodictask"."id" = 3;
args=(
u'<periodic-task-name>', u'<task.module.path>',
3, u'[1, False,]', u'{}',
False, u'2011-05-25 00:45:23.242387', 9,
u'2011-05-25 09:28:06.201148', 3
)
[DEBUG/Beat] Current schedule:
<ModelEntry: celery.backend_cleanup celery.backend_cleanup(*[], **{}) {<crontab: 0 4 * (m/h/d)>}
[DEBUG/Beat] Celerybeat: Waking up in 5.00 seconds.
EDIT:
It works with the following setting. I still have no idea why it doesn't work with django-celery.
CELERYBEAT_SCHEDULE = {
"example": {
"task": "<task.module.path>",
"schedule": crontab(),
"args": (1, False)
},
}
I had the same issue. Make sure the arguments are JSON formatted. For example, try setting the positional args to [1, false] -- lowercase 'false' -- I just tested it on a django-celery instance (version 2.2.4) and it worked.
For the keyword args, use something like {"name": "aldarund"}
I got the same problem too.
With the description of PeriodicTask models in djcelery ("JSON encoded positional arguments"), same as Evan answer. I try using python json lib to encode before save.
And this work with me
import json
o = PeriodicTask()
o.kwargs = json.dumps({'myargs': 'hello'})
o.save()
celery version 3.0.11
CELERYBEAT_SCHEDULE = {
"example": {
"task": "<task.module.path>",
"schedule": crontab(),
"enable": False
},
}
I tried and it worked.I run on celery beat v5.1.2