How to checkout and checkin any document outside alfresco using rest API? - web-services

I have created one Web Application using Servlets and JSP. Through that I have connected to alfresco repository. I am also able be to upload document in Alfresco and view document in external web application.
Now my requirement is, I have to give checkin and checkout option to those documents.
I found below rest apis for this purpuse.
But I am not getting how to use these apis in servlets to full-fill my requirment.
POST /alfresco/service/slingshot/doclib/action/cancel-checkout/site/{site}/{container}/{path}
POST /alfresco/service/slingshot/doclib/action/cancel-checkout/node/{store_type}/{store_id}/{id}
Can anyone please provide the simple steps or some piece of code to do this task?
Thanks in advance.

Please do not use the internal slingshot URLs for this. Instead, use OpenCMIS from Apache Chemistry. It will save you a lot of time and headaches and it is more portable to other repositories besides Alfresco.
The example below grabs an existing document by path, performs a checkout, then checks in a new major version of the plain text document.
package com.someco.cmis.examples;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.chemistry.opencmis.client.api.Document;
import org.apache.chemistry.opencmis.client.api.ObjectId;
import org.apache.chemistry.opencmis.client.api.Repository;
import org.apache.chemistry.opencmis.client.api.Session;
import org.apache.chemistry.opencmis.client.api.SessionFactory;
import org.apache.chemistry.opencmis.client.runtime.SessionFactoryImpl;
import org.apache.chemistry.opencmis.commons.SessionParameter;
import org.apache.chemistry.opencmis.commons.data.ContentStream;
import org.apache.chemistry.opencmis.commons.enums.BindingType;
public class CheckoutCheckinExample {
private String serviceUrl = "http://localhost:8080/alfresco/api/-default-/public/cmis/versions/1.1/atom"; // Uncomment for Atom Pub binding
private Session session = null;
public static void main(String[] args) {
CheckoutCheckinExample cce = new CheckoutCheckinExample();
cce.doExample();
}
public void doExample() {
Document doc = (Document) getSession().getObjectByPath("/test/test-plain-1.txt");
String fileName = doc.getName();
ObjectId pwcId = doc.checkOut(); // Checkout the document
Document pwc = (Document) getSession().getObject(pwcId); // Get the working copy
// Set up an updated content stream
String docText = "This is a new major version.";
byte[] content = docText.getBytes();
InputStream stream = new ByteArrayInputStream(content);
ContentStream contentStream = session.getObjectFactory().createContentStream(fileName, Long.valueOf(content.length), "text/plain", stream);
// Check in the working copy as a major version with a comment
ObjectId updatedId = pwc.checkIn(true, null, contentStream, "My new version comment");
doc = (Document) getSession().getObject(updatedId);
System.out.println("Doc is now version: " + doc.getProperty("cmis:versionLabel").getValueAsString());
}
public Session getSession() {
if (session == null) {
// default factory implementation
SessionFactory factory = SessionFactoryImpl.newInstance();
Map<String, String> parameter = new HashMap<String, String>();
// user credentials
parameter.put(SessionParameter.USER, "admin"); // <-- Replace
parameter.put(SessionParameter.PASSWORD, "admin"); // <-- Replace
// connection settings
parameter.put(SessionParameter.ATOMPUB_URL, this.serviceUrl); // Uncomment for Atom Pub binding
parameter.put(SessionParameter.BINDING_TYPE, BindingType.ATOMPUB.value()); // Uncomment for Atom Pub binding
List<Repository> repositories = factory.getRepositories(parameter);
this.session = repositories.get(0).createSession();
}
return this.session;
}
}
Note that on the version of Alfresco I tested with (5.1.e) the document must already have the versionable aspect applied for the version label to get incremented, otherwise the checkin will simply override the original.

Related

How to write Elastic unit tests to test query building

I want to write unit tests that test the Elastic query building. I want to test that certain param values produce certain queries.
I started looking into ESTestCase. I see that you can mock a client using ESTestCase. I don't really need to mock the ES node, I just need to reproduce the query building part, but that requires the client.
Has anybody dealt with such issue?
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import com.google.common.collect.Lists;
public class SearchRequestBuilderTests extends ESTestCase {
private static Client client;
#BeforeClass
public static void initClient() {
//this client will not be hit by any request, but it needs to be a non null proper client
//that is why we create it but we don't add any transport address to it
Settings settings = Settings.builder()
.put("", createTempDir().toString())
.build();
client = TransportClient.builder().settings(settings).build();
}
#AfterClass
public static void closeClient() {
client.close();
client = null;
}
public static Map<String, String> createSampleSearchParams() {
Map<String, String> searchParams = new HashMap<>();
searchParams.put(SenseneConstants.ADC_PARAM, "US");
searchParams.put(SenseneConstants.FETCH_SIZE_QUERY_PARAM, "10");
searchParams.put(SenseneConstants.QUERY_PARAM, "some query");
searchParams.put(SenseneConstants.LOCATION_QUERY_PARAM, "");
searchParams.put(SenseneConstants.RADIUS_QUERY_PARAM, "20");
searchParams.put(SenseneConstants.DISTANCE_UNIT_PARAM, DistanceUnit.MILES.name());
searchParams.put(SenseneConstants.GEO_DISTANCE_PARAM, "true");
return searchParams;
}
#Test
public void test() {
BasicSearcher searcher = new BasicSearcher(client); // this is my application's searcher
Map<String, String> searchParams = createSampleSearchParams();
ArrayList<String> filterQueries = Lists.newArrayList();
SearchRequest searchRequest = SearchRequest.create(searchParams, filterQueries);
MySearchRequestBuilder medleyReqBuilder = new MySearchRequestBuilder.Builder(client, "my_index", searchRequest).build();
SearchRequestBuilder searchRequestBuilder = medleyReqBuilder.constructSearchRequestBuilder();
System.out.print(searchRequestBuilder.toString());
// Here I want to assert that the search request builder output is what it should be for the above client params
}
}
I get this, and nothing in the code runs:
Assertions mismatch: -ea was not specified but -Dtests.asserts=true
REPRODUCE WITH: mvn test -Pdev -Dtests.seed=5F09BEDD71BBD14E - Dtests.class=SearchRequestBuilderTests -Dtests.locale=en_US -Dtests.timezone=America/Los_Angeles
NOTE: test params are: codec=null, sim=null, locale=null, timezone=(null)
NOTE: Mac OS X 10.10.5 x86_64/Oracle Corporation 1.7.0_80 (64-bit)/cpus=4,threads=1,free=122894936,total=128974848
NOTE: All tests run in this JVM: [SearchRequestBuilderTests]
Obviously a bit late but...
So this actually has nothing to do with the ES Testing framework but rather your run settings. Assuming you are running this in eclipse, this is actually a duplicate of Assertions mismatch: -ea was not specified but -Dtests.asserts=true.
eclipse preference -> junit -> Add -ea checkbox enable.
right click on the eclipse project -> run as -> run configure -> arguments tab -> add the -ea option in vm arguments

FOP image not found error

I'm trying to use an external graphic with xslt for PDF generation. Most images are working fine but every now and again one is 'not found' despite being viewable on a web browser. Here's the error that FOP spits out:
11:29:15.653 [main] ERROR org.apache.fop.apps.FOUserAgent - Image not found. URI: http://memesvault.com/wp-content/uploads/Derp-Meme-031.jpg. (No context info available)
And here's my external-graphic section:
<xsl:variable name="mediaUrl">
<xsl:value-of select="mediaUrl" />
</xsl:variable>
<fo:external-graphic src="url('{$mediaUrl}')"
height="200"
max-width="200"
content-width="scale-to-fit" />
Any idea what I'm doing wrong?
Edit: it looks like this problem is related to a server not permitting access for the automated request. Is there a way to set the User Agent's URIResolver in fop 2.1? It appears that this functionality existed in prior versions but I can't seem to find a way to do it with 2.1.
So the reason why this happened is because, as suggested by lfurini, the server was blocking the request because of the user agent. One can work around this by using a custom URIResolver with FOP:
URIResolverAdapter uriResolverAdapter = new URIResolverAdapter(new UserAgentUriResolver());
FopFactoryBuilder builder = new FopFactoryBuilder(URI.create("/"), uriResolverAdapter);
fopFactory = builder.build();
And here's a very simple URIResolver which adds in the user agent.
import javax.xml.transform.Source;
import javax.xml.transform.TransformerException;
import javax.xml.transform.URIResolver;
import javax.xml.transform.stream.StreamSource;
import java.io.IOException;
import java.net.URL;
import java.net.URLConnection;
public class UserAgentUriResolver implements URIResolver {
private static final String USER_AGENT = "whatever";
#Override
public Source resolve(String href, String base) throws TransformerException {
try {
URL url = new URL(href);
URLConnection connection = url.openConnection();
connection.setRequestProperty("User-Agent", USER_AGENT);
return new StreamSource(connection.getInputStream());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

Play WS standalone for 2.5.x

I want to create a Play web service client outside a Play application. For Play WS version 2.4.x it is easy to find that it is done like this:
val config = new NingAsyncHttpClientConfigBuilder().build()
val builder = new AsyncHttpClientConfig.Builder(config)
val client = new NingWSClient(builder.build)
However in 2.5.x the NingWSClient is now deprecated - instead the AhcWSClient should be used.
Unfortunately, I didn't find a complete example that explains the creation and usage of a AhcWsClient outside of Play. Currently I go with this:
import play.api.libs.ws.ahc.AhcWSClient
import akka.stream.ActorMaterializer
import akka.actor.ActorSystem
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
val ws = AhcWSClient()
val req = ws.url("http://example.com").get().map{
resp => resp.body
}(system.dispatcher)
Is this the correct way of creating a AhcWsClient? And is there a way of creating a AhcWSClient without an ActorSystem?
You are probably using compile time dependency injection, otherwise you would just use #Inject() (ws: WSClient), right?.
There is one example in the docs: https://www.playframework.com/documentation/2.5.x/ScalaWS#using-wsclient
So you could write something like this in your application loader:
lazy val ws = {
import com.typesafe.config.ConfigFactory
import play.api._
import play.api.libs.ws._
import play.api.libs.ws.ahc.{AhcWSClient, AhcWSClientConfig}
import play.api.libs.ws.ahc.AhcConfigBuilder
import org.asynchttpclient.AsyncHttpClientConfig
val configuration = Configuration.reference ++ Configuration(ConfigFactory.parseString(
"""
|ws.followRedirects = true
""".stripMargin))
val parser = new WSConfigParser(configuration, environment)
val config = new AhcWSClientConfig(wsClientConfig = parser.parse())
val builder = new AhcConfigBuilder(config)
val logging = new AsyncHttpClientConfig.AdditionalChannelInitializer() {
override def initChannel(channel: io.netty.channel.Channel): Unit = {
channel.pipeline.addFirst("log", new io.netty.handler.logging.LoggingHandler("debug"))
}
}
val ahcBuilder = builder.configure()
ahcBuilder.setHttpAdditionalChannelInitializer(logging)
val ahcConfig = ahcBuilder.build()
new AhcWSClient(ahcConfig)
}
applicationLifecycle.addStopHook(() => Future.successful(ws.close))
And then inject ws to your controllers. I'm not 100% sure with this approach, I would be happy if some Play guru could validate this.
Regarding an ActorSystem, you need it only to get a thread pool for resolving that Future. You can also just import or inject the default execution context:
play.api.libs.concurrent.Execution.Implicits.defaultContext.
Or you can use your own:
implicit val wsContext: ExecutionContext = actorSystem.dispatchers.lookup("contexts.your-special-ws-config").
AFAIK this is the proper way to create the AhcWSClient - at least in 2.5.0 and 2.5.1 - as seen in the Scala API
You can, of course, always take another HTTP client - there are many available for Scala - like Newman, Spray client, etc. (although Spray is also based on Akka so you would have to create an actor system as well)

How to force an Apache Mahout application read directly from the HDFS

I have implemented an Apache Mahout application (attached bellow) which does some basic computations. To do so it is required to load the dataset from my local machine. This application comes in the form of a jar file, but then its being executed within a hadoop pseudo-distributed cluster. The terminal command for that is: $ hadoop jar /home/eualin/ApacheMahout/tdunning-MiA-5b8956f/target/mia-0.1-jar-with-dependencies.jar mia.recommender.ch03.IREvaluatorBooleanPrefIntro2 "/home/eualin/Desktop/links-final"
Now, my question is how to do the same, but this time by reading the dataset from the HDFS (we, of course, suppose that the dataset is already stored in HDFS, e.g. in /user/eualin/output/links-final}. What should change in that case? This might help: hdfs://localhost:50010/user/eualin/output/links-final
package mia.recommender.ch03;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.mahout.cf.taste.common.TasteException;
import org.apache.mahout.cf.taste.eval.DataModelBuilder;
import org.apache.mahout.cf.taste.eval.IRStatistics;
import org.apache.mahout.cf.taste.eval.RecommenderBuilder;
import org.apache.mahout.cf.taste.eval.RecommenderIRStatsEvaluator;
import org.apache.mahout.cf.taste.impl.common.FastByIDMap;
import org.apache.mahout.cf.taste.impl.eval.GenericRecommenderIRStatsEvaluator;
import org.apache.mahout.cf.taste.impl.model.GenericBooleanPrefDataModel;
import org.apache.mahout.cf.taste.impl.model.file.FileDataModel;
import org.apache.mahout.cf.taste.impl.neighborhood.NearestNUserNeighborhood;
import org.apache.mahout.cf.taste.impl.recommender.GenericBooleanPrefUserBasedRecommender;
import org.apache.mahout.cf.taste.impl.similarity.LogLikelihoodSimilarity;
import org.apache.mahout.cf.taste.model.DataModel;
import org.apache.mahout.cf.taste.model.PreferenceArray;
import org.apache.mahout.cf.taste.neighborhood.UserNeighborhood;
import org.apache.mahout.cf.taste.recommender.Recommender;
import org.apache.mahout.cf.taste.similarity.UserSimilarity;
import java.io.File;
public class IREvaluatorBooleanPrefIntro2 {
private IREvaluatorBooleanPrefIntro2() {
}
public static void main(String[] args) throws Exception {
if (args.length != 1) {
System.out.println("give file's HDFS path");
System.exit(1);
}
DataModel model = new GenericBooleanPrefDataModel(
GenericBooleanPrefDataModel.toDataMap(
new GenericBooleanPrefDataModel(new FileDataModel(new File(args[0])))));
RecommenderIRStatsEvaluator evaluator =
new GenericRecommenderIRStatsEvaluator();
RecommenderBuilder recommenderBuilder = new RecommenderBuilder() {
#Override
public Recommender buildRecommender(DataModel model) throws TasteException {
UserSimilarity similarity = new LogLikelihoodSimilarity(model);
UserNeighborhood neighborhood =
new NearestNUserNeighborhood(10, similarity, model);
return new GenericBooleanPrefUserBasedRecommender(model, neighborhood, similarity);
}
};
DataModelBuilder modelBuilder = new DataModelBuilder() {
#Override
public DataModel buildDataModel(FastByIDMap<PreferenceArray> trainingData) {
return new GenericBooleanPrefDataModel(
GenericBooleanPrefDataModel.toDataMap(trainingData));
}
};
IRStatistics stats = evaluator.evaluate(
recommenderBuilder, modelBuilder, model, null, 10,
GenericRecommenderIRStatsEvaluator.CHOOSE_THRESHOLD,
1.0);
System.out.println(stats.getPrecision());
System.out.println(stats.getRecall());
}
}
You can't, directly, since the non-distributed code has no knowledge of HDFS. Instead, copy the file to a local location in setup() and then read it from a local file.

Testing Solr via Embedded Server

I'm coding some tests for my solr-indexer application. Following testing best practices, I want to write code self-dependant, just loading the schema.xml and solrconfig.xml and creating a temporary data tree for the indexing-searching tests.
As the application is most written in java, I'm dealing with SolrJ library, but I'm getting problems (well, I'm lost in the universe of corecontainers-coredescriptor-coreconfig-solrcore ...)
Anyone can place here some code to create an Embedded Server that loads the config and also writes to a parameter-pased data-dir?
You can start with the SolrExampleTests which extends SolrExampleTestBase which extends AbstractSolrTestCase .
Also this SampleTest.
Also take a look at this and this threads.
This is an example for a simple test case. solr is the directory that contains your solr configuration files:
import java.io.IOException;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.util.AbstractSolrTestCase;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.SolrParams;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class SolrSearchConfigTest extends AbstractSolrTestCase {
private SolrServer server;
#Override
public String getSchemaFile() {
return "solr/conf/schema.xml";
}
#Override
public String getSolrConfigFile() {
return "solr/conf/solrconfig.xml";
}
#Before
#Override
public void setUp() throws Exception {
super.setUp();
server = new EmbeddedSolrServer(h.getCoreContainer(), h.getCore().getName());
}
#Test
public void testThatNoResultsAreReturned() throws SolrServerException {
SolrParams params = new SolrQuery("text that is not found");
QueryResponse response = server.query(params);
assertEquals(0L, response.getResults().getNumFound());
}
#Test
public void testThatDocumentIsFound() throws SolrServerException, IOException {
SolrInputDocument document = new SolrInputDocument();
document.addField("id", "1");
document.addField("name", "my name");
server.add(document);
server.commit();
SolrParams params = new SolrQuery("name");
QueryResponse response = server.query(params);
assertEquals(1L, response.getResults().getNumFound());
assertEquals("1", response.getResults().get(0).get("id"));
}
}
See this blogpost for more info:Solr Integration Tests
First you need to set your Solr Home Directory which contains solr.xml and conf folder containing solrconfig.xml, schema.xml etc.
After that you can use this simple and basic code for Solrj.
File solrHome = new File("Your/Solr/Home/Dir/");
File configFile = new File(solrHome, "solr.xml");
CoreContainer coreContainer = new CoreContainer(solrHome.toString(), configFile);
SolrServer solrServer = new EmbeddedSolrServer(coreContainer, "Your-Core-Name-in-solr.xml");
SolrQuery query = new SolrQuery("Your Solr Query");
QueryResponse rsp = solrServer.query(query);
SolrDocumentList docs = rsp.getResults();
Iterator<SolrDocument> i = docs.iterator();
while (i.hasNext()) {
System.out.println(i.next().toString());
}
I hope this helps.