CUDD sum of products Boolean expression - binary-decision-diagram

I would like to create BDD for the following Boolean function:
F = (A'B'C'D') OR (A'B C) OR (C' D') OR (A)
I managed to create only F = (A'B'C'D') with the following code but how to add other product terms to the existing BDD?
int main (int argc, char *argv[])
{
char filename[30];
DdManager *gbm; /* Global BDD manager. */
gbm = Cudd_Init(0,0,CUDD_UNIQUE_SLOTS,CUDD_CACHE_SLOTS,0); /* Initialize a new BDD manager. */
DdNode *bdd, *var, *tmp_neg, *tmp;
int i;
bdd = Cudd_ReadOne(gbm); /*Returns the logic one constant of the manager*/
Cudd_Ref(bdd); /*Increases the reference count of a node*/
for (i = 3; i >= 0; i--) {
var = Cudd_bddIthVar(gbm,i); /*Create a new BDD variable*/
tmp_neg = Cudd_Not(var); /*Perform NOT boolean operation*/
tmp = Cudd_bddAnd(gbm, tmp_neg, bdd); /*Perform AND boolean operation*/
Cudd_Ref(tmp);
Cudd_RecursiveDeref(gbm,bdd);
bdd = tmp;
}
bdd = Cudd_BddToAdd(gbm, bdd);
print_dd (gbm, bdd, 2,4);
sprintf(filename, "./bdd/graph.dot");
write_dd(gbm, bdd, filename);
Cudd_Quit(gbm);
return 0;
}

Build every conjunction independently so that you get conj0 to conj3 make sure to only negate the correct literals. I'm not particularly versed in C and don't have a development environment setup right now so you will need to make some corrections.
I will use the following mapping
A <=> BDD(0)
B <=> BDD(1)
C <=> BDD(2)
D <=> BDD(3)
Build conj0 the way you do it now in your for loop. Make sure conj0 = bdd afterwards.
For conj1 which will encode (A' B C) use
bdd = Cudd_IthVar(gbm, 0);
bdd = Cudd_Not(bdd);
tmp = Cudd_And(gbm, bdd, Cudd_IthVar(gbm, 1));
Cudd_Ref(tmp);
Cudd_Deref(gbm, bdd);
bdd = tmp;
tmp = Cudd_And(gbm, bdd, Cudd_IthVar(gbm, 2));
Cudd_Ref(tmp);
Cudd_Deref(gbm, bdd);
bdd = tmp;
conj1 = bdd;
Do the same for conj2 and conj3.
After you've got all the conjunctions build build the top level disjunction by using Cudd_bddOr().
Also make sure that you get the Cudd_Ref() and Cudd_Deref() right otherwise you'll leak memory.

If you are only interested in that particular function, here's a way to build it and inspect it:
#include <stdio.h>
#include <stdlib.h>
#include "cudd.h"
int main(void) {
/* Get set. */
DdManager * mgr = Cudd_Init(4,0,CUDD_UNIQUE_SLOTS,CUDD_CACHE_SLOTS,0);
DdNode *a = Cudd_bddIthVar(mgr, 0);
DdNode *c = Cudd_bddIthVar(mgr, 1);
DdNode *b = Cudd_bddIthVar(mgr, 2);
DdNode *d = Cudd_bddIthVar(mgr, 3);
char const * const inames[] = {"a", "c", "b", "d"};
/* Build BDD. */
DdNode * tmp = Cudd_bddIte(mgr, c, b, Cudd_Not(d));
Cudd_Ref(tmp);
DdNode * f = Cudd_bddOr(mgr, a, tmp);
Cudd_Ref(f);
Cudd_RecursiveDeref(mgr, tmp);
/* Inspect it. */
printf("f");
Cudd_PrintSummary(mgr, f, 4, 0);
Cudd_bddPrintCover(mgr, f, f);
char * fform = Cudd_FactoredFormString(mgr, f, inames);
printf("%s\n", fform);
/* Break up camp and go home. */
free(fform);
Cudd_RecursiveDeref(mgr, f);
int err = Cudd_CheckZeroRef(mgr);
Cudd_Quit(mgr);
return err;
}
Note the choice of (optimal) variable order. You should see this output:
f: 5 nodes 1 leaves 12 minterms
1--- 1
-11- 1
-0-0 1
a | (c & b | !c & !d)

Related

Memory Leak in Matlab when calling a C++ mexfile

Dear stackoverflow community,
I am currently working on a MEX function that itself calls external C++ code.
I am calling my MEX in a loop.
Here is my mexfile:
#include "mex.h"
#include "OOMP.h"
#include <iostream>
using namespace std;
/*
function [h, Di, beta, c, Q, NorQ] = OOMP_v4(f, D, tol,ind,No)
%OOMP Optimized Orthogonal Matching Pursuit
%
% It creates an atomic decomposition of a siganl
% using OOMP criterion. You can choose a tolerance,
% the number of atoms to take in or an initial subspace to influence the OOMP algorithm.
%
%
% Usage: [h, Di, c, beta, Q, NorQ ] = OOMP_v2(f, D, tol,ind, No);
% [h, Di, c] = OOMP_v2(f, D, tol,ind, No);
*/
void mexFunction(int nlhs, mxArray *plhs[] , int nrhs, const mxArray *prhs[])
{
/* Begin input validation */
if(nrhs != 5)
{
mexErrMsgTxt("Wrong number of input arguments");
}
// Check we are being passed real full and non string variables
for ( int i = 0; i < nrhs; i++ )
{
if ( mxIsComplex(prhs[i]) || mxIsClass(prhs[i],"sparse") || mxIsChar(prhs[i]) || !mxIsDouble(prhs[i]) )
{
mexErrMsgTxt("Input must be double, real, full,and nonstring");
}
}
// First check f is a vector
if ((mxGetM(prhs[0]) !=1) || (mxGetN(prhs[0]) == 1))
{
mexErrMsgTxt("f should be a line vector");
}
// Validate D
if ( (mxGetM(prhs[1])) != (mxGetN(prhs[0])) )
{
mexPrintf("The atoms in the dictionary D are of length %i and should be of length %i\n",mxGetM(prhs[1]),mxGetN(prhs[0]));
mexErrMsgTxt("Error!!\n");
}
if ((mxGetM(prhs[1])) == 1)
{
mexErrMsgTxt("The dictionary D should contain more than 1 atom!!\n");
}
long L = mxGetM(prhs[1]);
long N = mxGetN(prhs[1]);
long numInd = mxGetN(prhs[3]);
Matrix *f = new Matrix(mxGetPr(prhs[0]),1,L, 'T');
Matrix *D = new Matrix(mxGetPr(prhs[1]),L,N,'T');
double tol = mxGetScalar(prhs[2]);
Matrix *mind = new Matrix(mxGetPr(prhs[3]),1,numInd);
long No = mxGetScalar(prhs[4]);
/* Begin Output validation */
if ( nlhs <3 || nlhs >6)
{
mexErrMsgTxt("Wrong number of output arguments");
}
Matrix *h = new Matrix(1,L,0);
Matrix *Di= new Matrix(1,0,0);
Matrix *beta= new Matrix(L,0,0);
Matrix *c= new Matrix(1,0,0);
Matrix *Q= new Matrix(L,0,0);
Matrix *NorQ= new Matrix((long)0,1,0);
int fail =OOMP_v4(f,D,tol,mind,No ,L, N,h,Di,beta,c,Q,NorQ);
if(!fail)
{
// Creation des variables de Sorties
long K = (*Di).getNbre_C();
plhs[0] = mxCreateDoubleMatrix(1,L,mxREAL);
plhs[1] = mxCreateDoubleMatrix(1,K,mxREAL);
plhs[2] = mxCreateDoubleMatrix(1,K,mxREAL);
if(nlhs>=4)
plhs[3] = mxCreateDoubleMatrix(L,K,mxREAL);
if(nlhs>=5)
plhs[4] = mxCreateDoubleMatrix(L,K,mxREAL);
if(nlhs==6)
plhs[5] = mxCreateDoubleMatrix(K,1,mxREAL);
// Transformation
double *m_h= mxGetPr(plhs[0]);
double *m_Di = mxGetPr(plhs[1]);
double *m_c = mxGetPr(plhs[2]);
ToArray(h,m_h,1,L);
ToArray(Di,m_Di,1,K);
ToArray(c,m_c,1,K);
if(nlhs>=4){
double *m_beta = mxGetPr(plhs[3]);
ToArray(beta,m_beta,L,K);
}
if(nlhs>=5){
double *m_Q = mxGetPr(plhs[4]);
ToArray(Q,m_Q,L,K);
}
if(nlhs==6){
double *m_NorQ = mxGetPr(plhs[5]);
ToArray(NorQ,m_NorQ,K,1);
}
}
else
{
mexErrMsgTxt("Error OOMP exited early see message above!!\n");
}
}
I am using the previous mexfile in a matlab program that runs successfully
Here is the loop using the mexfile:
for i=1:Nframes
f=F(:,i)'; %the signals are the colums of F
tol=PRD0*norm(f)/100;
[h, Di0, ~] = OOMP_for_new_Mex(f, D, tol,ind,No);
error_norm(i)= norm(f-h);
K(i)=numel(Di0);
clear Di0 h f tol ;
end
I got good results but the program is using too much memory 10Gb! which awkward because the variable stocked in the Matlab use only a few Mb.
The problem here that in each loop new memory is allocated but it is not released in the end of the loop: The pointers created in the mex file are not deleted after every iteration.
So my questions are:
1)Do you see something else that could cause a memory leak in the code?
2)how can I fix it?

Pass array of structs from C++ to GO

I'm trying to get an array of tensorflow box predictions from C++ to golang, but I'm not able to do it no matter what I do. I have a GO program that calls a function that does tensorflow detections in C++ using cgo. This all works and I'm able to get the predictions in C++. The problem is to transfer these predictions into GO as an array of 100 structs that each hold one prediction.
I'm able to set a pointer in GO and use this pointer address to set one struct in C++. The code for this is seen below.
I want to set an array of structs in C++ and retreive this array in GO. I thought it should be easy to just use the same pointer address as earlier and use this as the address for my C++ array. Then I could restore the struct from the pointer in GO. Does anyone have a solution for this?
GO
type PredictResult struct {
Loc [4]float32
Score int
Label int
}
var predictions PredictResult
predictions_ptr := unsafe.Pointer(&predictions)
C.LIB_predict(predictions_ptr)
fmt.Println("GO predictions; ", predictions)
bridge.hpp
struct PredictResult{
float Loc[4];
int64_t Score;
int64_t Label;
};
void LIB_predict(void* predictions);
bridge.cpp
void LIB_predict(void* predictions){
PredictResult *p = (PredictResult*)predictions;
p->Score = 6;
p->Label = 95;
}
Prints:
GO predictions; {[0 0 0 0] 6 95}
Assuming your C function returns the array as PredictResult* and assuming you know the length of the returned array (in the example below I assume 10, but you can replace it by whatever works), this approach should work:
// #include <stdio.h>
// #include <stdlib.h>
//
// typedef struct PredictResult {
// float Loc[4];
// int64_t Score;
// int64_t Label;
// } PredictResult;
//
// PredictResult* getOneResult() {
// PredictResult* p = (PredictResult*)calloc(1, sizeof(PredictResult));
// p->Score = 10;
// p->Label = 99;
// p->Loc[1] = 2.5;
// p->Loc[3] = 3.5;
// return p;
// }
//
// PredictResult* getTenResults() {
// PredictResult* parr = (PredictResult*)calloc(10, sizeof(PredictResult));
// parr[0].Score = 10;
// parr[0].Label = 99;
// parr[0].Loc[1] = 2.5;
// parr[0].Loc[3] = 3.5;
//
// parr[4].Score = 44;
// parr[4].Label = 123;
// parr[4].Loc[1] = 12.25;
// parr[4].Loc[3] = -40.5;
// return parr;
// }
//
//
import "C"
type PredictResult C.struct_PredictResult
func main() {
p := C.getOneResult()
if p == nil {
log.Fatal("got nil")
}
pp := (*PredictResult)(p)
fmt.Println(pp)
parr := C.getTenResults()
if parr == nil {
log.Fatal("got nil")
}
pslice := (*[1 << 28]PredictResult)(unsafe.Pointer(parr))[:10:10]
fmt.Println(pslice)
}
What you'll be most interested in is how the result of getTenResults is converted to a Go slice of the appropriate struct type. This is employing the technique recommended on the Go wiki.
Depending on the exact signature of your C function you may need to write a "bridge" function in the import "C" part to provide the data as convenient to Go, but this is the basic gist of it.
As an alternative, if you wish to allocate the slice on the Go side and pass in a pointer to C to populate, you can do this:
// void PopulateTenResults(void* arr) {
// PredictResult* parr = (PredictResult*)arr;
// parr[1].Score = 210;
// parr[1].Label = 299;
// parr[1].Loc[1] = 22.5;
// parr[1].Loc[3] = 23.5;
//
// parr[8].Score = 344;
// parr[8].Label = 3123;
// parr[8].Loc[1] = 312.25;
// parr[8].Loc[3] = -340.5;
// }
//
//
import "C"
And then in Go do:
prslice := make([]PredictResult, 10)
C.PopulateTenResults(unsafe.Pointer(&prslice[0]))
fmt.Println(prslice)
Of course the hard-coded 10 is just for simplicity here; you could pass the length of arr as a parameter to C.
You can pass a pointer to the first element in a slice and the length of the slice to C++ and treat it like a C-style array.

Removing duplication between similar functions in C and C++, in an EmPy template

Those #things are EmPy
C++
const char *
publish__#(spec.base_type.type)(void * untyped_data_writer, const void * untyped_message)
{
DataWriter * topic_writer = static_cast<DataWriter *>(untyped_data_writer);
const __ros_msg_type & ros_message = *(const __ros_msg_type *)untyped_message;
__dds_msg_type dds_message;
conversion_cpp(ros_message, dds_message);
#(__dds_msg_type_prefix)DataWriter * data_writer =
#(__dds_msg_type_prefix)DataWriter::_narrow(topic_writer);
DDS::ReturnCode_t status = data_writer->write(dds_message, DDS::HANDLE_NIL);
// some common switch statements in C and C++
}
}
C
static const char *
publish(void * data_writer, const void * cool_message)
{
if (!data_writer) {return "data writer handle is null";}
if (!cool_message) {return "ros message handle is null";}
DDS::DataWriter * topic_writer = static_cast<DDS::DataWriter *>(data_writer);
__dds_msg_type dds_message;
const char * err_msg = conversion_c(cool_message, &dds_message);
if (err_msg != 0) {return err_msg;}
#(__dds_msg_type_prefix)DataWriter * data_writer =
#(__dds_msg_type_prefix)DataWriter::_narrow(topic_writer);
DDS::ReturnCode_t status = data_writer->write(dds_message, DDS::HANDLE_NIL);
#[for field in spec.fields]#
#[if field.type.type == 'string']#
#[if field.type.is_array]#
{
#[if field.type.array_size]#
size_t size = #(field.type.array_size);
#[else]#
size_t size = dds_message.#(field.name)_.length();
#[end if]#
for (DDS::ULong i = 0; i < size; ++i) {
// This causes the DDS::String_mgr to release the given c string without freeing it.
dds_message.#(field.name)_[i]._retn();
}
}
#[else]#
// This causes the DDS::String_mgr to release the given c string without freeing it.
dds_message.#(field.name)_._retn();
#[end if]#
#[end if]#
#[end for]#
// some common switch statements in C and C++
}
}
This question is a bit specific to an open source project I am trying to contribute to, so I ll point to the exact functions I guess.
This is the original C method
and this is the C++ method
Do I need to use function pointers?
Another thing going on here is that the C package depends on the C++ package.
(Maybe this isn't good question or is a vague question, but I am not sure what to do as I am new to this codebase)

Building custom theories in Z3

This question is a follow up to the following question.
Procedural Attachment in Z3
I have a predicate (I use the name "heavier" in this case) over two integers that I need to evaluate using a custom algorithm. I have written the following piece of code to do it. But I see that the parameters that get passed into the function CMTh_reduce_app() are not actual integers, but consts of type integer. What I need is 2 integers, so that I can evaluate the predicate and return the result (The operations done in the function CMTh_reduce_app() right now are meaningless).
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<stdarg.h>
#include<memory.h>
#include<z3.h>
#include "z3++.h"
#include<iostream>
using namespace z3;
using namespace std;
struct _CMTheoryData {
Z3_func_decl heavier;
};
typedef struct _CMTheoryData CMTheoryData;
Z3_context ctx;
//Exit function
void exitf(const char* message)
{
fprintf(stderr,"BUG: %s.\n", message);
exit(1);
}
//Check and print model if available
void check(Z3_context ctx)
{
Z3_model m = 0;
Z3_lbool result = Z3_check_and_get_model(ctx, &m);
switch (result) {
case Z3_L_FALSE:
printf("unsat\n");
break;
case Z3_L_UNDEF:
printf("unknown\n");
printf("potential model:\n%s\n", Z3_model_to_string(ctx, m));
break;
case Z3_L_TRUE:
printf("sat\n%s\n", Z3_model_to_string(ctx, m));
break;
}
if (m) {
Z3_del_model(ctx, m);
}
}
//Create logical context. Enable model generation, and set error handler
void error_handler(Z3_error_code e)
{
printf("Error code: %d\n", e);
exitf("incorrect use of Z3");
}
Z3_context mk_context_custom(Z3_config cfg, Z3_error_handler err)
{
Z3_context ctx;
Z3_set_param_value(cfg, "MODEL", "true");
ctx = Z3_mk_context(cfg);
#ifdef TRACING
Z3_trace_to_stderr(ctx);
#endif
Z3_set_error_handler(ctx, err);
return ctx;
}
Z3_context mk_context()
{
Z3_config cfg;
Z3_context ctx;
cfg = Z3_mk_config();
ctx = mk_context_custom(cfg, error_handler);
Z3_del_config(cfg);
return ctx;
}
//Shortcut for binary fn application
Z3_ast mk_binary_app(Z3_context ctx, Z3_func_decl f, Z3_ast x, Z3_ast y)
{
Z3_ast args[2] = {x, y};
return Z3_mk_app(ctx, f, 2, args);
}
//Shortcut to create an int
Z3_ast mk_int(Z3_context ctx, int v)
{
Z3_sort ty = Z3_mk_int_sort(ctx);
return Z3_mk_int(ctx, v, ty);
}
Z3_ast mk_var(Z3_context ctx, const char * name, Z3_sort ty)
{
Z3_symbol s = Z3_mk_string_symbol(ctx, name);
return Z3_mk_const(ctx, s, ty);
}
Z3_ast mk_int_var(Z3_context ctx, const char * name)
{
Z3_sort ty = Z3_mk_int_sort(ctx);
return mk_var(ctx, name, ty);
}
//Callback when final check is to be carried out
Z3_bool CMTh_final_check(Z3_theory t) {
printf("Final check\n");
return Z3_TRUE;
}
//Callback when theory is to be deleted
void CMTh_delete(Z3_theory t) {
CMTheoryData * td = (CMTheoryData *)Z3_theory_get_ext_data(t);
printf("Delete\n");
free(td);
}
//Callback to reduce a function application(definition of custom functions, predicates)
Z3_bool CMTh_reduce_app(Z3_theory t, Z3_func_decl d, unsigned n, Z3_ast const args[], Z3_ast * result) {
CMTheoryData * td = (CMTheoryData*)Z3_theory_get_ext_data(t);
cout<<Z3_ast_to_string(ctx, args[0])<<' '<<Z3_ast_to_string(ctx,args[1])<<endl;
if (d == td->heavier) {
cout<<"Reducing the fn \'heavier\'"<<endl;
if(Z3_is_eq_ast(ctx,mk_int(ctx, 1),args[0])||Z3_is_eq_ast(ctx,mk_int(ctx,2),args[0]))
{
*result = Z3_mk_true(Z3_theory_get_context(t));
return Z3_TRUE;;
}
else
{
*result = Z3_mk_false(Z3_theory_get_context(t));
return Z3_TRUE;;
}
}
return Z3_FALSE; // failed to simplify
}
Z3_theory mk_cm_theory(Z3_context ctx) {
Z3_sort heavier_domain[2];
Z3_symbol heavier_name = Z3_mk_string_symbol(ctx, "heavier");
Z3_sort B = Z3_mk_bool_sort(ctx);
CMTheoryData * td = (CMTheoryData*)malloc(sizeof(CMTheoryData));
Z3_theory Th = Z3_mk_theory(ctx, "cm_th", td);
heavier_domain[0] = Z3_mk_int_sort(ctx);
heavier_domain[1] = Z3_mk_int_sort(ctx);
td->heavier = Z3_theory_mk_func_decl(ctx, Th, heavier_name, 2, heavier_domain, B); //context, theory, name_of_fn, number of arguments, argument type list, return type
Z3_set_delete_callback(Th, CMTh_delete);
Z3_set_reduce_app_callback(Th, CMTh_reduce_app);
Z3_set_final_check_callback(Th, CMTh_final_check);
return Th;
}
main()
{
Z3_ast a_ast, b_ast, c_ast, f1, f3, r;
Z3_sort i;
Z3_pattern p;
Z3_app bound[2];
Z3_theory Th;
CMTheoryData * td;
printf("\nCustom theory example\n");
ctx = mk_context();
Th = mk_cm_theory(ctx);
td = (CMTheoryData*)Z3_theory_get_ext_data(Th);
a_ast = mk_int_var(ctx, "a");
b_ast = mk_int_var(ctx, "b");
bound[0] = (Z3_app)a_ast;
f1=mk_binary_app(ctx, td->heavier, a_ast, b_ast);
r= Z3_mk_exists_const(ctx, 0, 1, bound, 0, 0,f1);
printf("assert axiom:\n%s\n", Z3_ast_to_string(ctx, r));
Z3_assert_cnstr(ctx, r);
check(ctx);
}
I know the user theory plugin is not supported anymore, but I really need to get this working, so if I could get any information, it would be really helpful. I tried looking at the source code, but I didn't know where to get started with building new theories into it. So, I'd appreciate some help with the theory plugin.
Models are not going to be accessible to you from the
abstraction that the deprecated theory plugin provides.
The problem is going to be that models are constructed later in the game.
It would require rewriting some of the internals to accommodate this
(it is not impossible, but a very fair chunk of work).
My impression is that it would be simpler to use just the basic interaction
with Z3 where you declare the predicates as uninterpreted, check for SAT.
Then if the current constraints are satisfiable,
use the current model to evaluate arguments. If you have values, that contradict your
built-in procedural attachment, then assert new facts that rule these values out (and as many
other infeasible values as possible). I call this the "lazy loop approach".
This interaction model corresponds to how SMT solvers can use
SAT solvers without providing theory propagation (propagating truth values
when new atoms are assigned). You would have to do a bit more work during
conflict analysis/resolution in order to produce strong lemmas. So a hybrid
between the built-in theory and the lazy loop approach may in the end work out.
But before getting there I suggest to just use Z3 as is and use the current model to
calculate new blocking clauses.
Of course you lose something: instantiation of quantifiers will proceed somewhat eagerly
and it could very well be the case that this lazy loop approach will not work well in the presence
of quantifiers.

How to use Given/When/Then pattern with mocks (Google Mock)?

I'm using the Given/When/Then pattern to make test code much clearer. Since I'm writing those tests in C++ I chosed to use Google Test. With tests the pattern is clear, because I do sth like this:
TEST(TestFixture, TestName)
{
// Given
int a = 5;
int b = 6;
int expectedResult = 30;
// When
int result = Multiply(a, b);
// Then
EXPECT_EQ(expectedResult, result);
}
But with mocks it stops being clear since there appear some EXPECTs in the Given part. The Given part suppose to be a setup step. Please see an example:
TEST(TestFixture, TestName)
{
// Given
int a = 5;
int b = 6;
int expectedResult = 30;
MightCalculatorMock mock;
EXPECT_CALL(mock, multiply(a,b))
.WillOnce(Return(expectedResult));
// When
int result = Multiply(mock, a, b);
// Then
EXPECT_EQ(expectedResult, result);
}
Is this approach correct? How the Given/When/Then comments should be placed in the test code, where?
The EXPECT_CALL macro can be thought of as a way of testing interaction between a class and another class. As such, if you are using it with another EXPECT macro, then your test is likely testing two things, which is why it appears to conflict with the "Given-When-Then" paradigm (also known as "Arrange-Act-Assert").
If you just need to set up some behavior on your mock object for testing, use the ON_CALL macro instead:
TEST(TestFixture, TestName)
{
// Given
int a = 5;
int b = 6;
int expectedResult = 30;
MightCalculatorMock mock;
ON_CALL(mock, multiply(a,b))
.WillByDefault(Return(expectedResult));
// When
int result = Multiply(mock, a, b);
// Then
EXPECT_EQ(expectedResult, result);
}
If you are actually looking to test the iteraction between your system under test and some other collaborator, you can use an "Arrange-Expect-Act" pattern:
TEST(TestFixture, CalculatorIsCalledProperly)
{
// Arrange
int a = 5;
int b = 6;
int expectedResult = 30;
MightCalculatorMock mock;
// Expect
EXPECT_CALL(mock, multiply(Eq(a),Eq(b)));
// Act
int result = Multiply(mock, a, b);
}