We are using WSO2esb-4.8.1.
By default the log4j properties uses
log4j.appender.CARBON_LOGFILE=org.wso2.carbon.logging.appenders.CarbonDailyRollingFileAppender
I want this to be Size based rolling file. As per documentation at https://docs.wso2.com/display/Carbon420/Managing+Logs, the following should do the trick.
##comment the following
###log4j.appender.CARBON_LOGFILE=org.wso2.carbon.logging.appenders.CarbonDailyRollingFileAppender
##Add the followng
log4j.appender.CARBON_LOGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.CARBON_LOGFILE.MaxFileSize=10MB
log4j.appender.CARBON_LOGFILE.MaxBackupIndex=20
But after these changes, the logs are rotating at 10MB, but only one file is maintained.
Is this a known issue in WSO2 ESB 4.8.1 ?
This was working cleanly in WSO2 ESB 4.9.0.
However, we do not have the option to upgrade to that since some of the other features that we need are broken there.
Finally I simulated log rotation behavior in a wso2 task. Refer https://docs.wso2.com/display/ESB481/Writing+Tasks+Sample to understand how to a write a sample WSo2 task.
This is the code
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.synapse.ManagedLifecycle;
import org.apache.synapse.core.SynapseEnvironment;
import org.apache.synapse.task.Task;
public class LogFileBackupTaskSample implements Task, ManagedLifecycle
{
private static final Log LOG = LogFactory.getLog(LogFileBackupTask.class);
private SynapseEnvironment synapseEnvironment;
// By default, the program is assumed to run from the WSO2 Home Folder
public static final String DEFAULT_LOG_FILE_LOCATION ="repository/logs/";
public static final String WSO2_LOG_FILE_NAME ="wso2carbon.log";
// If a value is not provided for logFileSizeInMbStr, backup of the LOG file size would happen after it reaches 100 MB
public static final int DEFAULT_LOG_FILE_SIZE_IN_MB =100;
// If a value is not provided for noOfBackupFiles, 20 files would be backed up.
public static final int DEFAULT_BACKUP_FILES_NUMBER =20;
public static final int MB_TO_BYTES=1024 *1000;
private String logLocation;
private int logFileSizeInMb;
private String logFileSizeInMbStr;
private String noOfBackupFiles;
private int noOfBackupFilesInt;
public void execute()
{
FileChannel sourceChannel = null;
FileChannel outChannel = null;
if(logLocation==null || logLocation.trim().equals(""))
{
if(LOG.isInfoEnabled())
{
LOG.info("No LOG location provided. Therefore using the default location of "+DEFAULT_LOG_FILE_LOCATION);
}
logLocation= DEFAULT_LOG_FILE_LOCATION;
}
if(!logLocation.endsWith("/") && !logLocation.endsWith("\\"))
{
logLocation= logLocation+File.separator;
}
if(logFileSizeInMb==0)
{
if(LOG.isInfoEnabled())
{
LOG.info("No LOG file size in MB. Therefore using the default size of "+DEFAULT_LOG_FILE_SIZE_IN_MB+" Mb");
}
logFileSizeInMb= DEFAULT_LOG_FILE_SIZE_IN_MB * MB_TO_BYTES;
}
String inputLogFileNameWithDirectory= logLocation+WSO2_LOG_FILE_NAME;
File inputLogFileWithDirectory = new File(inputLogFileNameWithDirectory);
long currentLogSize=0;
boolean fileSwapped= false;
try
{
currentLogSize = inputLogFileWithDirectory.length();
if(currentLogSize> logFileSizeInMb)
{
long currentDateLong = System.currentTimeMillis();
Date date = new Date(currentDateLong);
String outFileName= WSO2_LOG_FILE_NAME+"."+date.toString().replace(' ', '_').replace(':', '_');
sourceChannel = new FileInputStream(inputLogFileWithDirectory).getChannel();
File outFile = new File (logLocation+outFileName);
outFile.createNewFile();
outChannel = new FileOutputStream(outFile).getChannel();
outChannel.transferFrom(sourceChannel, 0, currentLogSize);
fileSwapped= true;
}
}
catch(IOException e)
{
LOG.error(e.toString(),e);
throw new RuntimeException(e);
}
finally
{
if(sourceChannel!=null)
{
try
{
sourceChannel.close();
}
catch (IOException e) {
// Ignored
LOG.error(e.toString(),e);
}
}
if(outChannel!=null)
{
try
{
outChannel.close();
}
catch (IOException e) {
// Ignored
LOG.error(e.toString(),e);
}
}
}
FileChannel sourceTruncateChannel = null;
try
{
if(fileSwapped)
{
sourceTruncateChannel = new FileOutputStream(inputLogFileWithDirectory).getChannel();
sourceTruncateChannel.truncate(currentLogSize);
}
}
catch(IOException e)
{
LOG.error(e.toString(),e);
throw new RuntimeException(e);
}
finally
{
if(sourceTruncateChannel!=null)
{
try
{
sourceTruncateChannel.close();
}
catch (IOException e) {
// Ignored
LOG.error(e.toString(),e);
}
}
}
if(fileSwapped)
{
deletingOldFiles();
}
}
public void deletingOldFiles()
{
if(noOfBackupFilesInt==0)
{
if(LOG.isInfoEnabled())
{
LOG.info("NoOfBackupFiles 0. Thus using the default number of "+DEFAULT_BACKUP_FILES_NUMBER);
}
noOfBackupFilesInt= DEFAULT_BACKUP_FILES_NUMBER;
}
File[] listOfFiles= new File(logLocation).listFiles();
List <TimeStampWiseFile> listOfWso2Files = new ArrayList<TimeStampWiseFile>();
for (int i = 0; i < listOfFiles.length; i++)
{
if(listOfFiles[i].getName().startsWith(WSO2_LOG_FILE_NAME) && !listOfFiles[i].getName().equals(WSO2_LOG_FILE_NAME))
{
listOfWso2Files.add(new TimeStampWiseFile(logLocation, listOfFiles[i].getName()));
}
}
// No files to delete in this case.
if(listOfWso2Files.size()<=noOfBackupFilesInt)
{
return;
}
TimeStampWiseFile[] listOfWSo2FilesArray = new TimeStampWiseFile[listOfWso2Files.size()];
listOfWSo2FilesArray= listOfWso2Files.toArray(listOfWSo2FilesArray);
// We need in descending order so that the old files are arranged at the bottom of the stack.
Arrays.sort(listOfWSo2FilesArray, Collections.reverseOrder());
int index=0;
for (int i = 0; i < listOfWSo2FilesArray.length; i++)
{
TimeStampWiseFile timeStampWiseFile = listOfWSo2FilesArray[i];
if(++index > noOfBackupFilesInt)
{
String fileName = timeStampWiseFile.getName();
timeStampWiseFile.delete();
if(LOG.isInfoEnabled())
{
LOG.info("Removed File "+fileName);
}
}
}
}
public void destroy()
{
}
public void init(SynapseEnvironment synapseEnvironment)
{
this.synapseEnvironment = synapseEnvironment;
}
public String getLogLocation()
{
return logLocation;
}
public void setLogLocation(String logLocation)
{
this.logLocation = logLocation;
if(logLocation== null || logLocation.trim().equals(""))
{
if(LOG.isInfoEnabled())
{
LOG.info("No LOG location provided. Therefore using the default location of "+DEFAULT_LOG_FILE_LOCATION);
}
logLocation= DEFAULT_LOG_FILE_LOCATION;
}
}
public String getLogFileSizeInMbStr()
{
return logFileSizeInMbStr;
}
public void setLogFileSizeInMbStr(String logFileSizeInMbStr)
{
this.logFileSizeInMbStr = logFileSizeInMbStr;
if(logFileSizeInMbStr== null || logFileSizeInMbStr.trim().equals(""))
{
if(LOG.isInfoEnabled())
{
LOG.info("No LOG file size in MB. Therefore using the default size of "+DEFAULT_LOG_FILE_SIZE_IN_MB+" Mb");
}
logFileSizeInMb= DEFAULT_LOG_FILE_SIZE_IN_MB * MB_TO_BYTES;
}
else
{
try
{
logFileSizeInMb= Integer.parseInt(logFileSizeInMbStr) * MB_TO_BYTES;
}
catch(NumberFormatException e)
{
LOG.error("logFileSizeInMb is not proper. If the size is 20MB, provide 20 as the 2nd argument. Due to the exception"
+ "using the default size of "+DEFAULT_LOG_FILE_SIZE_IN_MB+" Mb");
logFileSizeInMb= DEFAULT_LOG_FILE_SIZE_IN_MB * MB_TO_BYTES;
}
}
}
public String getNoOfBackupFiles()
{
return noOfBackupFiles;
}
public void setNoOfBackupFiles(String noOfBackupFiles)
{
this.noOfBackupFiles = noOfBackupFiles;
try
{
noOfBackupFilesInt= Integer.parseInt(noOfBackupFiles) ;
}
catch(NumberFormatException e)
{
LOG.error("NoOfBackupFiles is not proper. Proper a proper integer value. Due to the exception"
+ "using the default number of "+DEFAULT_BACKUP_FILES_NUMBER);
noOfBackupFilesInt= DEFAULT_BACKUP_FILES_NUMBER;
}
}
}
Now add this as a scheduled only in the master tenant. Refer https://docs.wso2.com/display/ESB480/Adding+and+Scheduling+Tasks for the details on this.
<?xml version="1.0" encoding="UTF-8"?>
<task xmlns="http://ws.apache.org/ns/synapse"
name="LogFileBackupTask"
class="LogFileBackupTask"
group="synapse.simple.quartz">
<trigger cron="0 0/1 * * * ?"/>
<property xmlns:task="http://www.wso2.org/products/wso2commons/tasks"
name="noOfBackupFiles"
value="20"/>
<property xmlns:task="http://www.wso2.org/products/wso2commons/tasks"
name="logFileSizeInMbStr"
value="20"/>
<property xmlns:task="http://www.wso2.org/products/wso2commons/tasks"
name="logLocation"
value="repository/logs/"/>
</task>
Related
I am working on a task to clear the cache of memorystore if the input file to be processed by dataflow has data. This means, if the input file has no records, the memorystore won't be flushed, but the input file has even one record, the memorystore should be flushed and then the input file should be processed.
My dataflow application is a multi-pipeline application which reads, processes and then stores the data in the memorystore. The pipeline is executing successfully. However, the flushing of the memorystore is working but after flushing, the insertion is not happening.
I have written a function that flushes the memorystore after checking if the input file has a record.
FlushingMemorystore.java
package com.click.example.functions;
import afu.org.checkerframework.checker.nullness.qual.Nullable;
import com.google.auto.value.AutoValue;
import org.apache.beam.sdk.io.redis.RedisConnectionConfiguration;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PDone;
import org.apache.beam.vendor.grpc.v1p26p0.com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
public class FlushingMemorystore {
private static final Logger LOGGER = LoggerFactory.getLogger(FlushingMemorystore.class);
public static FlushingMemorystore.Read read() {
return (new AutoValue_FlushingMemorystore_Read.Builder())
.setConnectionConfiguration(RedisConnectionConfiguration.create()).build();
}
#AutoValue
public abstract static class Read extends PTransform<PCollection<Long>, PDone> {
public Read() {
}
#Nullable
abstract RedisConnectionConfiguration connectionConfiguration();
#Nullable
abstract Long expireTime();
abstract FlushingMemorystore.Read.Builder toBuilder();
public FlushingMemorystore.Read withEndpoint(String host, int port) {
Preconditions.checkArgument(host != null, "host cannot be null");
Preconditions.checkArgument(port > 0, "port cannot be negative or 0");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withHost(host).withPort(port)).build();
}
public FlushingMemorystore.Read withAuth(String auth) {
Preconditions.checkArgument(auth != null, "auth cannot be null");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withAuth(auth)).build();
}
public FlushingMemorystore.Read withTimeout(int timeout) {
Preconditions.checkArgument(timeout >= 0, "timeout cannot be negative");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withTimeout(timeout)).build();
}
public FlushingMemorystore.Read withConnectionConfiguration(RedisConnectionConfiguration connectionConfiguration) {
Preconditions.checkArgument(connectionConfiguration != null, "connection cannot be null");
return this.toBuilder().setConnectionConfiguration(connectionConfiguration).build();
}
public FlushingMemorystore.Read withExpireTime(Long expireTimeMillis) {
Preconditions.checkArgument(expireTimeMillis != null, "expireTimeMillis cannot be null");
Preconditions.checkArgument(expireTimeMillis > 0L, "expireTimeMillis cannot be negative or 0");
return this.toBuilder().setExpireTime(expireTimeMillis).build();
}
public PDone expand(PCollection<Long> input) {
Preconditions.checkArgument(this.connectionConfiguration() != null, "withConnectionConfiguration() is required");
input.apply(ParDo.of(new FlushingMemorystore.Read.ReadFn(this)));
return PDone.in(input.getPipeline());
}
private static class ReadFn extends DoFn<Long, String> {
private static final int DEFAULT_BATCH_SIZE = 1000;
private final FlushingMemorystore.Read spec;
private transient Jedis jedis;
private transient Pipeline pipeline;
private int batchCount;
public ReadFn(FlushingMemorystore.Read spec) {
this.spec = spec;
}
#Setup
public void setup() {
this.jedis = this.spec.connectionConfiguration().connect();
}
#StartBundle
public void startBundle() {
this.pipeline = this.jedis.pipelined();
this.pipeline.multi();
this.batchCount = 0;
}
#ProcessElement
public void processElement(DoFn<Long, String>.ProcessContext c) {
Long count = c.element();
batchCount++;
if(count==null && count < 0) {
LOGGER.info("No Records are there in the input file");
} else {
if (pipeline.isInMulti()) {
pipeline.exec();
pipeline.sync();
jedis.flushDB();
}
LOGGER.info("*****The memorystore is flushed*****");
}
}
#FinishBundle
public void finishBundle() {
if (this.pipeline.isInMulti()) {
this.pipeline.exec();
this.pipeline.sync();
}
this.batchCount=0;
}
#Teardown
public void teardown() {
this.jedis.close();
}
}
#AutoValue.Builder
abstract static class Builder {
Builder() {
}
abstract FlushingMemorystore.Read.Builder setExpireTime(Long expireTimeMillis);
abstract FlushingMemorystore.Read build();
abstract FlushingMemorystore.Read.Builder setConnectionConfiguration(RedisConnectionConfiguration connectionConfiguration);
}
}
}
I am using the function in my Starter Pipeline code.
Code snippet of starter pipeline where the function is being used:
StorageToRedisOptions options = PipelineOptionsFactory.fromArgs(args)
.withValidation()
.as(StorageToRedisOptions.class);
Pipeline p = Pipeline.create(options);
PCollection<String> lines = p.apply(
"ReadLines", TextIO.read().from(options.getInputFile()));
/**
* Flushing the Memorystore if there are records in the input file
*/
lines.apply("Checking Data in input file", Count.globally())
.apply("Flushing the data store", FlushingMemorystore.read()
.withConnectionConfiguration(RedisConnectionConfiguration
.create(options.getRedisHost(), options.getRedisPort())));
Code snippet for the processed data to be inserted after clearing the cache:
dataset.apply(SOME_DATASET_TRANSFORMATION, RedisIO.write()
.withMethod(RedisIO.Write.Method.SADD)
.withConnectionConfiguration(RedisConnectionConfiguration
.create(options.getRedisHost(), options.getRedisPort())));
The dataflow executes fine and it flushes the memorystore as well but the insertion is not working after that. Could you please point out where I am going wrong?
Any solution for resolving the issue is truly appreciated. Thanks in advance!
Edit:
Providing additional information as requested in the comments
The runtime used is Java 11, and it is using Apache Beam SDK for 2.24.0
If the input file has records, it will process the data with some logic. For example, if the input file has data like:
abcabc|Bruce|Wayne|2000
abbabb|Tony|Stark|3423
The dataflow will count the number of records which 2 in this case and will process the id, first name, etc. according to the logic, and then it stores in memorystore. This input file will be coming everyday hence, the memorystore should be cleared (or flushed) if the input file has records.
Although the pipeline is not breaking, but I think I am missing out something.
I suspect the problem here is that you need to ensure the "Flush" step runs (and completes) before the RedisIO.write step happens. Beam has a Wait.on transform that you can use for this.
To accomplish this, we can use the output from the flushing PTransform as a signal that we've flushed the database - and we only write to the database after we are done flushing. The process call for your flushing DoFn would look like this:
#ProcessElement
public void processElement(DoFn<Long, String>.ProcessContext c) {
Long count = c.element();
if(count==null && count < 0) {
LOGGER.info("No Records are there in the input file");
} else {
if (pipeline.isInMulti()) {
pipeline.exec();
pipeline.sync();
jedis.flushDB();
}
LOGGER.info("*****The memorystore is flushed*****");
}
c.output("READY");
}
Once we have a signal pointing that the database has been flushed, we can use it to wait before writing the new data to it:
Pipeline p = Pipeline.create(options);
PCollection<String> lines = p.apply(
"ReadLines", TextIO.read().from(options.getInputFile()));
/**
* Flushing the Memorystore if there are records in the input file
*/
PCollection<String> flushedSignal = lines
.apply("Checking Data in input file", Count.globally())
.apply("Flushing the data store", FlushingMemorystore.read()
.withConnectionConfiguration(RedisConnectionConfiguration
.create(options.getRedisHost(), options.getRedisPort())));
// Then we use the flushing signal to start writing to Redis:
dataset
.apply(Wait.on(flushedSignal))
.apply(SOME_DATASET_TRANSFORMATION, RedisIO.write()
.withMethod(RedisIO.Write.Method.SADD)
.withConnectionConfiguration(RedisConnectionConfiguration
.create(options.getRedisHost(), options.getRedisPort())));
The issue is resolved after I applied to Wait.on transform as Pablo's answer explained it already. However, I had to rewrite my FlushingMemorystore.java a bit to a PCollection for the flushSignal flag.
Here's the function:
package com.click.example.functions;
import afu.org.checkerframework.checker.nullness.qual.Nullable;
import com.google.auto.value.AutoValue;
import org.apache.beam.sdk.io.redis.RedisConnectionConfiguration;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.vendor.grpc.v1p26p0.com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
public class FlushingMemorystore extends DoFn<Long, String> {
private static final Logger LOGGER = LoggerFactory.getLogger(FlushingMemorystore.class);
public static FlushingMemorystore.Read read() {
return (new AutoValue_FlushingMemorystore_Read.Builder())
.setConnectionConfiguration(RedisConnectionConfiguration.create()).build();
}
#AutoValue
public abstract static class Read extends PTransform<PCollection<Long>, PCollection<String>> {
public Read() {
}
#Nullable
abstract RedisConnectionConfiguration connectionConfiguration();
#Nullable
abstract Long expireTime();
abstract FlushingMemorystore.Read.Builder toBuilder();
public FlushingMemorystore.Read withEndpoint(String host, int port) {
Preconditions.checkArgument(host != null, "host cannot be null");
Preconditions.checkArgument(port > 0, "port cannot be negative or 0");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withHost(host).withPort(port)).build();
}
public FlushingMemorystore.Read withAuth(String auth) {
Preconditions.checkArgument(auth != null, "auth cannot be null");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withAuth(auth)).build();
}
public FlushingMemorystore.Read withTimeout(int timeout) {
Preconditions.checkArgument(timeout >= 0, "timeout cannot be negative");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withTimeout(timeout)).build();
}
public FlushingMemorystore.Read withConnectionConfiguration(RedisConnectionConfiguration connectionConfiguration) {
Preconditions.checkArgument(connectionConfiguration != null, "connection cannot be null");
return this.toBuilder().setConnectionConfiguration(connectionConfiguration).build();
}
public FlushingMemorystore.Read withExpireTime(Long expireTimeMillis) {
Preconditions.checkArgument(expireTimeMillis != null, "expireTimeMillis cannot be null");
Preconditions.checkArgument(expireTimeMillis > 0L, "expireTimeMillis cannot be negative or 0");
return this.toBuilder().setExpireTime(expireTimeMillis).build();
}
public PCollection<String> expand(PCollection<Long> input) {
Preconditions.checkArgument(this.connectionConfiguration() != null, "withConnectionConfiguration() is required");
return input.apply(ParDo.of(new FlushingMemorystore.Read.ReadFn(this)));
}
#Setup
public Jedis setup() {
return this.connectionConfiguration().connect();
}
private static class ReadFn extends DoFn<Long, String> {
private static final int DEFAULT_BATCH_SIZE = 1000;
private final FlushingMemorystore.Read spec;
private transient Jedis jedis;
private transient Pipeline pipeline;
private int batchCount;
public ReadFn(FlushingMemorystore.Read spec) {
this.spec = spec;
}
#Setup
public void setup() {
this.jedis = this.spec.connectionConfiguration().connect();
}
#StartBundle
public void startBundle() {
this.pipeline = this.jedis.pipelined();
this.pipeline.multi();
this.batchCount = 0;
}
#ProcessElement
public void processElement(#Element Long count, OutputReceiver<String> out) {
batchCount++;
if(count!=null && count > 0) {
if (pipeline.isInMulti()) {
pipeline.exec();
pipeline.sync();
jedis.flushDB();
LOGGER.info("*****The memorystore is flushed*****");
}
out.output("SUCCESS");
} else {
LOGGER.info("No Records are there in the input file");
out.output("FAILURE");
}
}
#FinishBundle
public void finishBundle() {
if (this.pipeline.isInMulti()) {
this.pipeline.exec();
this.pipeline.sync();
}
this.batchCount=0;
}
#Teardown
public void teardown() {
this.jedis.close();
}
}
#AutoValue.Builder
abstract static class Builder {
Builder() {
}
abstract FlushingMemorystore.Read.Builder setExpireTime(Long expireTimeMillis);
abstract FlushingMemorystore.Read build();
abstract FlushingMemorystore.Read.Builder setConnectionConfiguration(RedisConnectionConfiguration connectionConfiguration);
}
}
}
Google Cloud and to PubSub Service.
I have implemented a simple CustomAttributeReceiver in the lines of below sample code
https://github.com/googleapis/java-pubsub/blob/master/samples/snippets/src/main/java/pubsub/PublishWithCustomAttributesExample.java
How ever i am getting the below exception
com.google.gson.JsonSyntaxException: java.lang.IllegalStateException: Expected STRING but was BEGIN_OBJECT at path $ at com.google.gson.Gson.fromJson(Gson.java:944) at
com.google.gson.Gson.fromJson(Gson.java:1003) at
com.google.cloud.functions.invoker.NewBackgroundFunctionExecutor$TypedFunctionExecutor.serviceLegacyEvent(NewBackgroundFunctionExecutor.java:257) at
com.google.cloud.functions.invoker.NewBackgroundFunctionExecutor.serviceLegacyEvent(NewBackgroundFunctionExecutor.java:343) at
Can someone throw light on what i am missing here?
Publisher side
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.google.api.core.ApiFuture;
import com.google.cloud.pubsub.v1.Publisher;
import com.google.protobuf.ByteString;
import com.google.pubsub.v1.PubsubMessage;
import com.google.pubsub.v1.TopicName;
import PubSubMessage;
TopicName topicName = TopicName.of(projectId, topicId);
System.out.println("informListenersAboutSucessfulRegisteration=" + topicName);
Publisher publisher = null;
try {
publisher = Publisher.newBuilder(topicName).build();
PubSubMessage newUserRegisterMsg = new PubSubMessage();
Map<String, String> attributes = new HashMap<String, String>();
attributes.put(PubSubMessage.newUserLanguage, newUserLanguage);
newUserRegisterMsg.setAttributes(attributes);
ByteString data = ByteString.copyFromUtf8("NewUserRegisteration");
PubsubMessage pubsubMessage = PubsubMessage.newBuilder().setData(data).putAllAttributes(attributes).build();
ApiFuture<String> messageIdFuture = publisher.publish(pubsubMessage);
String messageId = messageIdFuture.get();
System.out.println("Published message ID: " + messageId);
} catch (Exception e) {
Logger.getLogger(PubSubMessenger.name).log(Level.SEVERE, "Exception while publishing message", e);
} finally {
if (publisher != null) {
publisher.shutdown();
publisher.awaitTermination(1, TimeUnit.MINUTES);
}
}
Receiver side
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import com.google.cloud.functions.BackgroundFunction;
import com.google.cloud.functions.Context;
import PubSubMessage;
public class SendEmailFromSendGrid implements BackgroundFunction<PubSubMessage> {
public SendEmailFromSendGrid() {
}
public void accept(PubSubMessage message, Context context) throws Exception {
System.out.println("invoked accept");
String name = "World";
if (message != null && message.getData() != null) {
name = new String(Base64.getDecoder().decode(message.getData().getBytes(StandardCharsets.UTF_8)),
StandardCharsets.UTF_8);
}
System.out.println(String.format("Hello %s!", name));
return;
}
}
PubSubMessage Definition
import java.util.Map;
public class PubSubMessage {
public static final String newUserLanguage = "userLanguage";
private String data;
private Map<String, String> attributes;
private String messageId;
private String publishTime;
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public Map<String, String> getAttributes() {
return attributes;
}
public void setAttributes(Map<String, String> attributes) {
this.attributes = attributes;
}
public String getMessageId() {
return messageId;
}
public void setMessageId(String messageId) {
this.messageId = messageId;
}
public String getPublishTime() {
return publishTime;
}
public void setPublishTime(String publishTime) {
this.publishTime = publishTime;
}
}
THANK YOU
This answer was provided by #user1241724 in the comment section:
Redid the whole exercise and it is working now. Only difference is
added default constructor in PubSubMessage.
I'm trying to create and test an API endpoint using AWS Lambda and API Gateway. I can test my function successfully using Lambda Test, but when I try to test my endpoint it gives:
{
"message": "Internal server error"
}
This is my handler class:
package com.amazonaws.lambda.gandhi.conversion.api;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.spec.InvalidKeySpecException;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.RandomStringUtils;
import com.amazonaws.lambda.gandhi.conversion.api.Response.AuthClientCredentialResponse;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestHandler;
import com.amazonaws.lambda.gandhi.conversion.api.utils.ClientAuthPOJO;
public class AuthClientCredentialServiceHandler implements RequestHandler<ClientAuthPOJO, Object> {
private AuthClientCredentialResponse authClientCredentialResponse;
private static final SecureRandom RANDOM = new SecureRandom();
public static int MAX_CLIENT_KEY = 10;
public static int CLIENT_SECRET_LENGTH = 69;
#Override
public AuthClientCredentialResponse handleRequest(ClientAuthPOJO clientIdSecret, Context context) {
String clientSecret;
try {
context.getLogger().log("Input: "
+ clientIdSecret);
String clientId = clientIdSecret.getClientId();
clientSecret = generateClientSecretKey();
Map<String, String> clientCredsMap = getClientCredentials();
if (clientCredsMap.size() > MAX_CLIENT_KEY) {
throw new RuntimeException(String.format("Max limit is %d, Please delete some keys", MAX_CLIENT_KEY));
}
clientCredsMap.forEach((k, v) -> {
if (clientId.equals(k)) {
throw new RuntimeException("Client Already exists");
}
});
storeClientCredentials(clientId, clientSecret);
AuthClientCredentialResponse authClientCredentialResponse = AuthClientCredentialResponse.builder().success(
true).clientId(clientId).clientSecret(clientSecret).build();
this.authClientCredentialResponse = authClientCredentialResponse;
} catch (Exception e) {
throw new RuntimeException(
"Failed to generate client secret: "
+ e.getMessage());
}
return authClientCredentialResponse;
}
private String generateClientSecretKey() throws NoSuchAlgorithmException, InvalidKeySpecException {
String clientSecret = RandomStringUtils.randomAlphanumeric(CLIENT_SECRET_LENGTH);
System.out.printf("clientSecret: %s%n", clientSecret);
return clientSecret;
}
private void storeClientCredentials(String clientId, String clientSecret) throws IOException {
/*
* TODO:
* Some logic to store clientCredentials to a file or DB. Decide later.
*/
System.out.println("temp ClientCredentials stored");
}
public Map<String, String> getClientCredentials() throws IOException {
/*
* TODO:
* Some logic to fetch clientCredentials from file or DB. Decide later.
*/
Map<String, String> clientCredMap = new HashMap<String, String>();
clientCredMap.put("1", "secretKey1");
clientCredMap.put("2", "secretKey2");
clientCredMap.put("3", "secretKey3");
clientCredMap.put("4", "secretKey4");
return clientCredMap;
}
}
My input class:
package com.amazonaws.lambda.gandhi.conversion.api.utils;
public class ClientAuthPOJO {
String clientId;
String clientSecret;
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
public String getClientSecret() {
return clientSecret;
}
public void setClientSecret(String clientSecret) {
this.clientSecret = clientSecret;
}
public ClientAuthPOJO(String clientId, String clientSecret) {
super();
this.clientId = clientId;
this.clientSecret = clientSecret;
}
public ClientAuthPOJO() {
}
}
My test object in lambda:
My test for endpoint in API Gateway:
Can someone please help me figure out the problem in creating the function or API Gateway?
Edit:
When I check the logs, I found that the parameters to the functions (clientId and clientSecret) are null. So there seems to be some problem in the way I'm sending my request body.
I am get Interceptors working on the application server. I have annotated EJB:
#Stateless
#Named("accountsEJB")
public class AccountsEJB {
#PersistenceContext(unitName = "weducationPU")
private EntityManager em;
// . . . other methods
#WithLog
#Restricted(allowedRoles = {}) // Allowed only for admin
public Account save(Account item) {
if (item.getId() == 0) {
em.persist(item);
return item;
} else {
return em.merge(item);
}
}
#WithLog
#Restricted(allowedRoles = {}) // Allowed only for admin
public void delete(final Account item) {
Account a = em.find(Account.class, item.getId());
if (null != a) {
em.remove(a);
}
}
}
Empty list of roles means, that It's allowed only for role admin.
Here the unit test file for this EJB
public class AccountsEJBTest {
private static EJBContainer container;
private static AccountsEJB ejb;
#BeforeClass
public static void setUpClass() {
try {
Map<String, Object> properties = new HashMap<>();
properties.put(EJBContainer.MODULES, new File("target/classes"));
properties.put("org.glassfish.ejb.embedded.glassfish.installation.root", "glassfish");
properties.put(EJBContainer.APP_NAME, "weducation");
container = EJBContainer.createEJBContainer(properties);
ejb = (AccountsEJB) container.getContext().lookup("java:global/weducation/classes/AccountsEJB");
System.out.println("AccountsEJBTest running...");
} catch (NamingException e) {
fail("Container init error: " + e.getMessage());
}
}
#AfterClass
public static void tearDownClass() {
if (null != container) {
container.close();
}
System.out.println("AccountsEJBTest finished");
}
private boolean equals(Account source, Account result) {
if (!source.getFullName().contentEquals(result.getFullName())) return false;
if (!source.getLogin().contentEquals(result.getLogin())) return false;
return source.getRole() == result.getRole();
}
#Test
public void testOperations() {
try {
System.out.println("-->testOperations()");
Account testAccount = new Account();
testAccount.setFullName("Test Account");
testAccount.setLogin("test");
testAccount.setPassword("test");
testAccount.setConfirm("test");
testAccount.updatePassword();
testAccount.setRole(AccountRole.DEPOT);
Account savedAccount = ejb.save(testAccount);
assertTrue(equals(testAccount, savedAccount));
savedAccount.setFullName("Still Test Account");
savedAccount.setLogin("test1");
testAccount = ejb.save(savedAccount);
assertTrue(equals(testAccount, savedAccount));
testAccount.setPassword("testpwd");
testAccount.setConfirm("testpwd");
testAccount.updatePassword();
savedAccount = ejb.save(testAccount);
assertTrue(equals(testAccount, savedAccount));
ejb.delete(savedAccount);
} catch (Exception e) {
fail("Exception class " + e.getClass().getName() + " with message " + e.getMessage());
}
}
}
And this test working. I think, that is not correct, because there is no user with admin role logged in. But why this behavior happing?
UPDATED.
#Restricted interface:
#Inherited
#InterceptorBinding
#Target({METHOD, TYPE})
#Retention(RUNTIME)
public #interface Restricted {
#Nonbinding
AccountRole[] allowedRoles();
}
SecurityInterceptor class
#Interceptor
#Restricted(allowedRoles = {})
public class SecurityInterceptor implements Serializable {
#Inject
private transient SessionMB session;
#AroundInvoke
public Object checkSecurity(InvocationContext context) throws Exception {
//System.out.println("Security checker started.");
if ((session == null) || (session.getUser() == null)) {
throw new SecurityException("Can't get user info");
}
// Allow all to admin
if (session.isAdmin()) {
//System.out.println("It's admin.");
return context.proceed();
}
// walk non administrator roles
for (AccountRole r : getAllowedRoles(context.getMethod())) {
// if match - accept method invocation
if (session.getUser().getRole() == r) {
//System.out.println("It's " + r.getDescription());
return context.proceed();
}
}
throw new SecurityException(session.getUser().getFullName()
+ " has no souch privilegies ");
}
private AccountRole[] getAllowedRoles(Method m) {
if (null == m) {
throw new IllegalArgumentException("Method is null!");
}
// Walk all method annotations
for (Annotation a : m.getAnnotations()) {
if (a instanceof Restricted) {
return ((Restricted) a).allowedRoles();
}
}
// Now - walk all class annotations
if (null != m.getDeclaringClass()) {
for (Annotation a : m.getDeclaringClass().getAnnotations()) {
if (a instanceof Restricted) {
return ((Restricted) a).allowedRoles();
}
}
}
// if no annotaion found
throw new RuntimeException("Annotation #Restricted not found at method "
+ m.getName() + " or it's class.");
}
}
The beans.xml is placed in WEB-INF folder and looks like
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://xmlns.jcp.org/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://xmlns.jcp.org/xml/ns/javaee http://xmlns.jcp.org/xml/ns/javaee/beans_1_1.xsd"
bean-discovery-mode="annotated">
<interceptors>
<class>ru.edu.pgtk.weducation.interceptors.LogInterceptor</class>
<class>ru.edu.pgtk.weducation.interceptors.SecurityInterceptor</class>
</interceptors>
</beans>
Can someone help me to know:
How to get Interceptors working in Unit tests?
How to start authorized session in Unit tests (log in as admin, for example)?
How to test such operations as creation and deleting account with the different tests (one test for creating, one for deleting)? Is it correct - to test all operations in one test?
Thank you for your time and your questions.
I have created web service using Spring-WS. When I send a request to the web service, this is the response I get in soap-ui:
enter code here
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Header/>
<SOAP-ENV:Body>
<ns2:SendResponse xmlns:ns2="http://mycompany.com/schema/">
<ns2:SendResult>
<ns2:Token>A00179-02</ns2:Token>
</ns2:SendResult>
</ns2:SendResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
Is there any way to get rid of the "ns2" namespace prefix from the response? I tried a couple of options:
1) Manually updated package-info.java to set the prefix to "":
#XmlSchema(namespace = "http://mycompany.com/schema/",
xmlns = {
#XmlNs(namespaceURI = "http://mycompany.com/schema/", prefix = "")
},
elementFormDefault = javax.xml.bind.annotation.XmlNsForm.QUALIFIED)
package com.example.foo.jaxb;
2) Set the prefix to "" in the QName object in the endpoint class:
return new JAXBElement<SendAndCommitResponse>(new QName("http://mycompany.com/schema/",
"SendResponse",""), SendResponse.class, response);
Both didn't work. How to get rid off the "ns2" namespace prefix?
I eventually found a solution for this.
My problem was caused by JDK 6 not shipping a full version of rt.jar (http://www.oracle.com/technetwork/java/javase/compatibility-137541.html).
I added the following to my maven config
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>
<version>2.2.6</version>
</dependency>
And then added
#XmlSchema(namespace = "http://mycompany.com/schema/",
elementFormDefault = javax.xml.bind.annotation.XmlNsForm.UNQUALIFIED).
In the package-info.java (like suggested by #acdcjunior above)
I tried a few of the approaches discussed here, but nothing worked...
Below Class from the link - https://zhuanlan.zhihu.com/p/35298171 fixed my issue
Added the below interceptor to remove the namespaces -
public class PayloadPrefixInterceptor extends TransformerHelper implements EndpointInterceptor {
public static final String NAMESPACE = ObjectFactory.class.getPackage().getAnnotation(XmlSchema.class).namespace();
public static final String XMLNS = "xmlns:";
#Override
public boolean handleRequest(MessageContext messageContext, Object endpoint) throws Exception {
return true;
}
#Override
public boolean handleResponse(MessageContext messageContext, Object endpoint) throws Exception {
WebServiceMessage response = messageContext.getResponse();
Source payloadSource = response.getPayloadSource();
DOMResult result = new DOMResult();
transform(payloadSource, result);
removePrefix(result.getNode());
transform(new DOMSource(result.getNode()), response.getPayloadResult());
return true;
}
private void removePrefix(Node node) {
if (node == null) {
return;
}
if (node.getNodeType() == Node.ELEMENT_NODE) {
removeNamespaceDeclaration(node);
}
if (node.getPrefix() != null) {
node.setPrefix(null);
}
NodeList childNodes = node.getChildNodes();
if (childNodes != null) {
IntStream.of(0, childNodes.getLength()).forEach(index -> removePrefix(childNodes.item(index)));
}
Node nextSibling = node.getNextSibling();
if (nextSibling != null) {
removePrefix(nextSibling);
}
}
private void removeNamespaceDeclaration(Node node) {
NamedNodeMap attributes = node.getAttributes();
IntStream.range(0, attributes.getLength()).forEach(index -> {
Node attribute = attributes.item(index);
if (StringUtils.startsWith(attribute.getNodeName(), XMLNS) &&
StringUtils.equals(attribute.getNodeValue(), NAMESPACE)) {
attributes.removeNamedItemNS(attribute.getNamespaceURI(), attribute.getLocalName());
}
});
}
#Override
public boolean handleFault(MessageContext messageContext, Object endpoint) throws Exception {
return true;
}
#Override
public void afterCompletion(MessageContext messageContext, Object endpoint, Exception ex) throws Exception {
}
}
Registered the interceptor using below -
#EnableWs
#Configuration
public class Config extends WsConfigurerAdapter {
#Override
public void addInterceptors(List<EndpointInterceptor> interceptors) {
interceptors.add(new PayloadPrefixInterceptor());
super.addInterceptors(interceptors);
}
}
it was hard
first: create a class that intercepts soap request and responses:
package examples.webservices.handler;
import java.lang.reflect.Method;
import java.util.Collections;
import java.util.Set;
import javax.xml.namespace.QName;
import javax.xml.soap.SOAPElement;
import javax.xml.soap.SOAPMessage;
import javax.xml.ws.handler.MessageContext;
import javax.xml.ws.handler.soap.SOAPHandler;
import javax.xml.ws.handler.soap.SOAPMessageContext;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
public class CorrigirConteudoRequisicaoSOAP implements SOAPHandler<SOAPMessageContext> {
public Set<QName> getHeaders() {
return Collections.emptySet();
}
public boolean handleMessage(SOAPMessageContext messageContext) {
this.corrigirConteudoRequisicaoSOAP(messageContext);
return true;
}
private void corrigirConteudoRequisicaoSOAP(SOAPMessageContext messageContext){
SOAPMessage msg = messageContext.getMessage();
try {
NodeList childNodes = msg.getSOAPBody().getChildNodes();
for(int k = 0; k < childNodes.getLength(); k++){
Node item = childNodes.item(k);
String localName = item.getLocalName();
{
item.setPrefix("");
Method m = SOAPElement.class.getDeclaredMethod("setElementQName", QName.class);
//I was forced to use reflection because the method setElementQname is not //visible, neither the class that implements it
m.invoke(item, new QName("", item.getLocalName()));
msg.saveChanges();
}
}
} catch (Exception e) {
try {
msg.writeTo(System.out);
} catch (Exception e1) {
e1.printStackTrace();
}
System.out.println();
}
}
public boolean handleFault(SOAPMessageContext messageContext) {
return true;
}
public void close(MessageContext messageContext) {
}
public static void main(String[] args)throws Exception {
}
}
second: associate the service to soap handle
public class PortalFornecedor {
public Usuario getUsuario(){
XIF367Afae09A3344Fbf2E1De819D6EcbaService classeComNomeFeio = new XIF367Afae09A3344Fbf2E1De819D6EcbaService();
Usuario service = classeComNomeFeio.getHTTPPort();
BindingProvider bp = (BindingProvider)service;
Map<String, Object> requestContext = bp.getRequestContext();
requestContext.put(BindingProvider.USERNAME_PROPERTY, "user");
requestContext.put(BindingProvider.PASSWORD_PROPERTY, "pass");
this.handle(service);
return service;
}
public Object getDashboard(){
return "";
}
// here we associate the service to soap handle
private BindingProvider handle(Usuario service) {
BindingProvider bp = (BindingProvider)service;
#SuppressWarnings("rawtypes")
List<Handler> chain = new ArrayList<Handler>();
chain.add(new CorrigirConteudoRequisicaoSOAP());
bp.getBinding().setHandlerChain(chain);
return bp;
}
public static void main(String[] args) {
PortalFornecedor pf = new PortalFornecedor();
Usuario usuario = pf.getUsuario();
LoginExecutarIN in = new LoginExecutarIN();
generated.Usuario user = new generated.Usuario();
user.setLogin("onias");
user.setSenha("12345");
user.setCodigoUsuario(0);
in.setParametroEntrada(user);
try {
LoginExecutarOUT out = usuario.loginExecutar(in);
// SOAPMessageContext.getMessage();
System.out.println(out.getRegistroSelecionado().getNome());
} catch (Exception e) {
e.printStackTrace();
}
}
}
Here is the simple and easiest solution for that problem. Create Package-Info.Java file in your model package and add the below script to that.
#javax.xml.bind.annotation.XmlSchema(namespace = "http://mycompany.com/schema", elementFormDefault = javax.xml.bind.annotation.XmlNsForm.QUALIFIED, xmlns = { #javax.xml.bind.annotation.XmlNs(namespaceURI = "http://mycompany.com/schema", prefix = "") })
package my.com.scicom.stars.model;
And add elementFormDefault as "qualified" in your xsd or wsdl file.
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns="http://mycompany.com/schema"
targetNamespace="http://mycompany.com/schema"
elementFormDefault="qualified">