Is the following akka.conf file valid? - akka

I am using OpenDaylight and trying to replace the default distributed database with Apache Ignite.
I am using the jar obtained by the source code here.
https://github.com/Romeh/akka-persistance-ignite
However, the class IgniteWriteJournal does not seem to load which i have checked by putting some print statements in its constuructor.
Is there any issue with the .conf file?
The following is a portion of the akka.conf file i am using in OpenDaylight.
odl-cluster-data {
akka {
remote {
artery {
enabled = off
canonical.hostname = "10.145.59.38"
canonical.port = 2550
}
netty.tcp {
hostname = "10.145.59.38"
port = 2550
}
# when under load we might trip a false positive on the failure detector
# transport-failure-detector {
# heartbeat-interval = 4 s
# acceptable-heartbeat-pause = 16s
# }
}
cluster {
# Remove ".tcp" when using artery.
seed-nodes = ["akka.tcp://opendaylight-cluster-data#10.145.59.38:2550"]
roles = ["member-1"]
}
extensions = ["akka.persistence.ignite.extension.IgniteExtensionProvider"]
akka.persistence.journal.plugin = "akka.persistence.journal.ignite"
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot.ignite"
persistence {
# Ignite journal plugin
journal {
ignite {
# Class name of the plugin
class = "akka.persistence.ignite.journal.IgniteWriteJournal"
cache-prefix = "akka-journal"
// Should be based into the the dara grid topology
cache-backups = 1
// if ignite is already started in a separate standalone grid where journal cache is already created
cachesAlreadyCreated = false
}
}
# Ignite snapshot plugin
snapshot {
ignite {
# Class name of the plugin
class = "akka.persistence.ignite.snapshot.IgniteSnapshotStore"
cache-prefix = "akka-snapshot"
// Should be based into the the dara grid topology
cache-backups = 1
// if ignite is already started in a separate standalone grid where snapshot cache is already created
cachesAlreadyCreated = false
}
}
}
}
ignite {
//to start client or server node to connect to Ignite data cluster
isClientNode = false
// for ONLY testing we use localhost
// used for grid cluster connectivity
tcpDiscoveryAddresses = "localhost"
metricsLogFrequency = 0
// thread pools used by Ignite , should based into target machine specs
queryThreadPoolSize = 4
dataStreamerThreadPoolSize = 1
managementThreadPoolSize = 2
publicThreadPoolSize = 4
systemThreadPoolSize = 2
rebalanceThreadPoolSize = 1
asyncCallbackPoolSize = 4
peerClassLoadingEnabled = false
// to enable or disable durable memory persistance
enableFilePersistence = true
// used for grid cluster connectivity, change it to suit your configuration
igniteConnectorPort = 11211
// used for grid cluster connectivity , change it to suit your configuration
igniteServerPortRange = "47500..47509"
//durable memory persistance storage file system path , change it to suit your configuration
ignitePersistenceFilePath = "./data"
}
}

I assume you modified the configuration/initial/akka.conf. First those sections need to be inside the odl-cluster-data section (can't tell from just your snippet). Also it looks like the following should be:
akka.persistence.journal.plugin = "akka.persistence.journal.ignite"
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot.ignite"

Related

cloneVM_Task configure cloned VM to another distributed virtual port

How do I use VirtualMachineConfigSpec to set device change and configure my new clone to another distributed virtual port? My cloned VM network adapter type is VirtualVmxnet3, I would like to configure the 10.xx.xxx.xx-x_vm_ddc (dv port name) dv switch upon successful clone.
Following clone method takes a source vm (powered off) as template for new clone:
public ResponseEntity<?> cloneRhcosVm(#PathVariable String vcenter, #RequestBody VmClone vmClone) {
VirtualMachine vm = null;
VirtualMachineCloneSpec cloneSpec = new VirtualMachineCloneSpec();
VirtualMachineConfigSpec configSpec = new VirtualMachineConfigSpec();
VirtualMachineRelocateSpec relocSpec = new VirtualMachineRelocateSpec();
VirtualMachineBootOptions boot = new VirtualMachineBootOptions();
VirtualMachineBootOptionsBootableCdromDevice isoBoot = new
VirtualMachineBootOptionsBootableCdromDevice();
VirtualMachineBootOptionsBootableDevice[] bootOrder = new
VirtualMachineBootOptionsBootableDevice[]{isoBoot};
List<VirtualDeviceConfigSpec> vdSpecAll = new ArrayList<VirtualDeviceConfigSpec>();
VirtualDeviceConfigSpec[] vdSpecArray = new VirtualDeviceConfigSpec[]{};
try {
String sourceVmName = vmClone.getSourceVmName();
String cloneVmName = vmClone.getTargetVmName();
vm = vmService.getVirtualMachine(vcenter, sourceVmName);
int cloneVmCPUs = vmClone.getTargetVmCPUs();
long cloneVmMemoryMB = (long) vmClone.getTargetVmMemoryGB() * Constants.MB_TO_GB;
String cloneVmNicName = vmClone.getTargetVmNicName();
boot.setBootOrder(bootOrder);
// Get target nic (dv switch) virutal device
VirtualDeviceConfigSpec nicSpec = getTargetNicVDConfigSpec(vm, cloneVmNicName);
if (nicSpec != null) {
vdSpecAll.add(nicSpec);
}
vdSpecArray = vdSpecAll.toArray(new VirtualDeviceConfigSpec[vdSpecAll.size()]);
configSpec.setDeviceChange(vdSpecArray);
configSpec.setName(cloneVmName);
configSpec.setNumCPUs(cloneVmCPUs);
configSpec.setNumCoresPerSocket(cloneVmCPUs);
configSpec.setMemoryMB(cloneVmMemoryMB);
configSpec.setBootOptions(boot);
cloneSpec.setConfig(configSpec);
cloneSpec.setLocation(relocSpec);
cloneSpec.setPowerOn(true);
cloneSpec.setTemplate(false);
Task task = vm.cloneVM_Task((Folder) vm.getParent(), cloneVmName, cloneSpec);
TaskInfo taskInfo = task.getTaskInfo();
...
}
Following method used to configure new nic virtual device: targetNicName is the new 10.xx.xxx.xx-x_vm_ddc I want to set to after cloning
private VirtualDeviceConfigSpec getTargetNicVDConfigSpec(VirtualMachine vm, String targetNicName) {
VirtualDevice[] vds = vm.getConfig().getHardware().getDevice();
VirtualDeviceConfigSpec nicSpec = new VirtualDeviceConfigSpec();
VirtualDeviceConnectInfo deviceConnInfo = new VirtualDeviceConnectInfo();
String adapter1 = "Network adapter 1";
for (int i = 0; i < vds.length; i++) {
if ((vds[i] instanceof VirtualEthernetCard) && (vds[i].getDeviceInfo().getLabel().equalsIgnoreCase(adapter1))) {
VirtualEthernetCard nic = (VirtualEthernetCard) vds[i];
VirtualEthernetCardNetworkBackingInfo nicBacking = (VirtualEthernetCardNetworkBackingInfo) nic.getBacking();
deviceConnInfo.setConnected(true);
nicBacking.setDeviceName(targetNicName);
nic.setBacking(nicBacking);
nic.setConnectable(deviceConnInfo);
nicSpec.setOperation(VirtualDeviceConfigSpecOperation.edit);
nicSpec.setDevice(nic);
return nicSpec;
}
}
return null;
}
Above code works but the new network adapter is not being configured as a dvSwitch as I was expecting by setting the new device name.
I found that my network adapter virtual device (vd) is of class VirtualVmxnet3 and the vd's backing info is of class VirtualEthernetCardDistributedVirtualPortBackingInfo, however these classes do not have any method that works like .setDevice(nicName) to configure the network adapter to new dvSwitch by passing name 10.xx.xxx.xx-x_vm_ddc.
Any clues if it's possible to clone and configure to another dv port?

What happens internally when an akka.conf file is read?

I am using OpenDaylight and trying to replace the default distributed database with Apache Ignite.
I am using the jar obtained by using the source code here:
https://github.com/Romeh/akka-persistance-ignite and deployed it in OpenDaylight karaf container.
The following is a portion of the akka.conf file i am using in OpenDaylight to replace the LevelDB journal with Apache Ignite.
odl-cluster-data {
akka {
loglevel = DEBUG
actor {
provider = "akka.cluster.ClusterActorRefProvider"
default-dispatcher {
# Configuration for the fork join pool
fork-join-executor {
# Min number of threads to cap factor-based parallelism number to
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor)
parallelism-factor = 2.0
# Max number of threads to cap factor-based parallelism number to
parallelism-max = 10
}
# Throughput defines the maximum number of messages to be
# processed per actor before the thread jumps to the next actor.
# Set to 1 for as fair as possible.
throughput = 10
}
}
remote {
log-remote-lifecycle-events = off
netty.tcp {
hostname = "10.145.59.44"
port = 2551
}
}
cluster {
seed-nodes = [
"akka.tcp://test#127.0.0.1:2551"
]
min-nr-of-members = 1
auto-down-unreachable-after = 30s
}
# Disable legacy metrics in akka-cluster.
akka.cluster.metrics.enabled=off
akka.persistence.journal.plugin = "akka.persistence.journal.ignite"
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot.ignite"
extensions = ["akka.persistence.ignite.extension.IgniteExtensionProvider"]
persistence {
# Ignite journal plugin
journal {
ignite {
# Class name of the plugin
class = "akka.persistence.ignite.journal.IgniteWriteJournal"
plugin-dispatcher = "ignite-dispatcher"
cache-prefix = "akka-journal"
// Should be based into the the dara grid topology
cache-backups = 1
// if ignite is already started in a separate standalone grid where journal cache is already created
cachesAlreadyCreated = false
}
}
# Ignite snapshot plugin
snapshot {
ignite {
# Class name of the plugin
class = "akka.persistence.ignite.snapshot.IgniteSnapshotStore"
plugin-dispatcher = "ignite-dispatcher"
cache-prefix = "akka-snapshot"
// Should be based into the the dara grid topology
cache-backups = 1
// if ignite is already started in a separate standalone grid where snapshot cache is already created
cachesAlreadyCreated = false
}
}
}
}
}
However, the class IgniteWriteJournal does not seem to load which i have checked by putting some print statements in its constuructor as follows.
public IgniteWriteJournal(Config config) throws NotSerializableException {
System.out.println("!##$% inside IgniteWriteJournal constructor\n");
ActorSystem actorSystem = context().system();
serializer = SerializationExtension.get(actorSystem).serializerFor(PersistentRepr.class);
storage = new Store<>(actorSystem);
JournalCaches journalCaches = journalCacheProvider.apply(config, actorSystem);
sequenceNumberTrack = journalCaches.getSequenceCache();
cache = journalCaches.getJournalCache();
}
So what exactly happens to the class that is mentioned in the akka.persistence.journal.ignite tag? Does the constructor of that class get called? What exactly happens in the background when the akka.conf file is read?
Where are looking for the print outs - in data/log/karaf.log? System.out.println doesn't go there - use an org.slf4j.Logger.
How did you rebuild the IgniteWriteJournal source and deploy the new artifact? Are you sure your changes were actually deployed?

com.typesafe.config.ConfigException$Missing: No configuration setting found for key 'akka.stream'

I'm trying to run an akka stream application, but I am getting an exception:
No configuration setting found for key 'akka.stream'
the relevant code snippet is:
ConfigFactory.load()
implicit val system = ActorSystem("svc")
implicit val mat = ActorMaterializer()
I try both command lines:
java -jar ./myService.jar -Dconfig.resource=/opt/myservice/conf/application.conf
java -jar ./myService.jar -Dconfig.file=/opt/myService/conf/application.conf
my application.conf file:
akka {
event-handlers = ["akka.event.slf4j.Slf4jEventHandler"]
loglevel = "DEBUG"
actor {
}
stream {
# Default materializer settings
materializer {
max-input-buffer-size = 16
dispatcher = ""
subscription-timeout {
mode = cancel
timeout = 5s
}
output-burst-limit = 1000
auto-fusing = on
max-fixed-buffer-size = 1000000000
sync-processing-limit = 1000
}
blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher"
default-blocking-io-dispatcher {
type = "Dispatcher"
executor = "thread-pool-executor"
throughput = 1
thread-pool-executor {
fixed-pool-size = 16
}
}
}
}
exception details:
No configuration setting found for key 'akka.stream'
at
com.typesafe.config.impl.SimpleConfig.findKeyOrNull(SimpleConfig.java:152)
at com.typesafe.config.impl.SimpleConfig.findKey(SimpleConfig.java:145)
at com.typesafe.config.impl.SimpleConfig.findOrNull(SimpleConfig.java:172)
at com.typesafe.config.impl.SimpleConfig.findOrNull(SimpleConfig.java:176)
at com.typesafe.config.impl.SimpleConfig.find(SimpleConfig.java:184)
at com.typesafe.config.impl.SimpleConfig.find(SimpleConfig.java:189)
at com.typesafe.config.impl.SimpleConfig.getObject(SimpleConfig.java:258)
at com.typesafe.config.impl.SimpleConfig.getConfig(SimpleConfig.java:264)
at com.typesafe.config.impl.SimpleConfig.getConfig(SimpleConfig.java:37)
at akka.stream.ActorMaterializerSettings$.apply(ActorMaterializer.scala:248)
at akka.stream.ActorMaterializer$$anonfun$1.apply(ActorMaterializer.scala:41)
at akka.stream.ActorMaterializer$$anonfun$1.apply(ActorMaterializer.scala:41)
at scala.Option.getOrElse(Option.scala:121)
at akka.stream.ActorMaterializer$.apply(ActorMaterializer.scala:41)
at com.Listener$.main(Listener.scala:41)
at com.Listener.main(Listener.scala)
can you assist?
thanks
To load config from a file, you should use:
-Dconfig.file=/opt/myService/conf/application.conf
Doc link: https://github.com/typesafehub/config#standard-behavior

Akka (.net) cluster with remote nodes: Disassociated exception

Using akka (.net) I am trying to implement simple cluster use case.
Cluster - for nodes up/down events.
Remote - for sending message to specific node.
There are two actors: Master Node which listening cluster events and Slave Node which connecting to the cluster.
Address address = new Address("akka.tcp", "ClusterSystem", "master", 8080);
cluster.Join(address);
When ClusterEvent.MemberUp message is reseived Master Node creating actor link:
ClusterEvent.MemberUp up = message as ClusterEvent.MemberUp;
ActorSelection nodeActor = system.ActorSelection(up.Member.Address + "/user/slave_0");
Sending message to this actor causes an error:
Association with remote system akka.tcp://ClusterSystem#slave:8090 has failed; address is now gated for 5000 ms. Reason is: [Disassociated]
master config:
akka {
actor {
provider = ""Akka.Cluster.ClusterActorRefProvider, Akka.Cluster""
}
remote {
helios.tcp {
port = 8080
hostname = master
bind-hostname = master
bind-port = 8080
send-buffer-size = 512000b
receive-buffer-size = 512000b
maximum-frame-size = 1024000b
tcp-keepalive = on
}
}
cluster{
failure-detector {
heartbeat - interval = 10 s
}
auto-down-unreachable-after = 10s
gossip-interval = 5s
}
stdout-loglevel = DEBUG
loglevel = DEBUG
debug {{
receive = on
autoreceive = on
lifecycle = on
event-stream = on
unhandled = on
}}
}
slave config:
akka {
actor {
provider = ""Akka.Cluster.ClusterActorRefProvider, Akka.Cluster""
}
remote {
helios.tcp {
port = 8090
hostname = slave
bind-hostname = slave
bind-port = 8090
send-buffer-size = 512000b
receive-buffer-size = 512000b
maximum-frame-size = 1024000b
tcp-keepalive = on
}
}
cluster{
failure-detector {
heartbeat - interval = 10 s
}
auto-down-unreachable-after = 10s
gossip-interval = 5s
}
stdout-loglevel = DEBUG
loglevel = DEBUG
debug {{
receive = on
autoreceive = on
lifecycle = on
event-stream = on
unhandled = on
}}
}
Here's your problem:
cluster{
failure-detector {
heartbeat - interval = 10 s
}
auto-down-unreachable-after = 10s
gossip-interval = 5s
}
heartbeat-interval and auto-down-unreachable-after are the same duration - therefore your nodes will almost always disassociate automatically after 10s, because you're betting on a race condition that the failure detector might lose.
auto-down-unreachable-after is a dangerous setting - do not use it. You'll end up with a split brain or worse.
And make sure your failure detector interval is always lower than your auto-down interval.

After changing a key value from a machine2, Not getting the changed value from Machine1

I did a sample application & run the application from 2 different machines where both application is using AppFabric cache. I set the pollInterval="120" secs in both applications config file with below settings:
<localCache isEnabled="true"
sync="NotificationBased"
ttlValue="300"
objectCount="10"/>
<!--(optional) specify cache notifications poll interval-->
<clientNotification pollInterval="120" />
Also enabled Notification in cluster using powershell.
Now from Machine1 I read the key called key1 whose value is "Value1".
then from Machine2 I changed the value of key1 to "Changed".
then from Machine2 I read the key called key1 whose value is now displayed as "Changed".
then after the poll interval period which is 2 mnts I read the key called key1 from Machine1, whose value is now displayed still as "Value1". Why it's not displaying "Changed".
Why the change is not detected by the application in Machine1? Why the local cache invalidation not occurring?
At Ahmed Ilyas:>
show the code you are using to read and write to the cache. you also have not explained how you configured AppFabric and these machines. are they joined to the cluster?
I have done reading through AFC Read-Through API. which is done in separate project. Write to cache is done just by Put() method. As this is a sample project, so I though no need to update the database only update at cache cluster.
The above config settings for each application running in 2 machines.
I have allowed access for these 2 machines by granting access to them in cache cluster. 1 machine is both AFC server & cache client(i.e Machine1).
Hope this helps you to answer. Find the code below:
public class CacheUtil
{
private static DataCacheFactory _factory = null;
private static DataCache _cache = null;
static CacheUtil()
{
if (_cache == null)
{
// Declare array for cache host(s).
DataCacheServerEndpoint[] servers = new DataCacheServerEndpoint[1];
servers[0] = new DataCacheServerEndpoint("H1011.hoboo.net", 22233);
// Set the local cache properties. In this example, it
// is timeout-based with a timeout of 300 seconds(5mnts).
DataCacheLocalCacheProperties localCacheConfig;
TimeSpan localTimeout = new TimeSpan(0, 5, 0);
localCacheConfig = new DataCacheLocalCacheProperties(60, localTimeout, DataCacheLocalCacheInvalidationPolicy.TimeoutBased);
// Setup the DataCacheFactory configuration.
DataCacheFactoryConfiguration factoryConfig = new DataCacheFactoryConfiguration();
//factoryConfig.ChannelOpenTimeout = new TimeSpan(0, 0, 0);
//factoryConfig.Servers = servers;
//factoryConfig.LocalCacheProperties = localCacheConfig;
_factory = new DataCacheFactory();
//_factory = new DataCacheFactory(factoryConfig);
_cache = _factory.GetCache("default");
}
}
public static DataCache GetCache()
{
if (_cache != null) return _cache;
try
{
RuntimeContext.WriteAppFabricErrorLog(new AppFabricLogger()
{
CacheKey = "Connected to AppFabric Cache Server.",
CacheData = "Connected to AppFabric Cache Server.",
ErrorString = "Connected to AppFabric Cache Server."
});
}
catch (Exception ex)
{
//Suppress Error
}
return _cache;
}
}
Other class which has Get():>
public static object Get(string pName)
{
object cachedItem = null;
try
{
//Check configuration settings for AppFabric.
bool appFabricCache;
bool.TryParse(System.Configuration.ConfigurationManager.AppSettings["AppFabricCache"], out appFabricCache);
if (appFabricCache)
{
//Get data from AppFabric Cache Server.
cachedItem = CacheUtil.GetCache().Get(pName);
}
else
{
//Get data from Local Cache Server.
cachedItem = RuntimeContextOlderVersion.Get(pName);
}
}
catch (Exception Ex)
{
//If failes, write reason to log file.
WriteAppFabricErrorLog(new AppFabricLogger()
{
CacheKey = pName,
CacheData = "Get Method",
ErrorString = Ex.ToString()
});
}
return cachedItem;
}
#stuartd
Yes I have enabled notifictions. You can see that in my appconfig.
For Staurt:>
PS C:\Windows\system32> get-cacheconfig
cmdlet Get-CacheConfig at command pipeline position 1
Supply values for the following parameters:
CacheName: default
CacheName : default
TimeToLive : 10 mins
CacheType : Partitioned
Secondaries : 0
MinSecondaries : 0
IsExpirable : True
EvictionType : LRU
NotificationsEnabled : True
WriteBehindEnabled : False
WriteBehindInterval : 300
WriteBehindRetryInterval : 60
WriteBehindRetryCount : -1
ReadThroughEnabled : True
ProviderType : SampleProvider.Provider,SampleProvider, Version=1.0.
0.0, Culture=neutral, PublicKeyToken=cde85af3c5f6411
e
ProviderSettings : {"DBConnection"="Database=Test123;Server=..**.
**;uid=****;pwd=*****;connection timeout=5000"}