In TensorFlow Lite Micro are Dense/Dropout/Flatten Layers available? - c++

I know the available ops are in all_ops_resolver.cc but there are non for Dropout, Flatten or Dense.
The magic_wand example trains a model using these layers.
def build_cnn(seq_length):
"""Builds a convolutional neural network in Keras."""
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
8, (4, 3),
padding="same",
activation="relu",
input_shape=(seq_length, 3, 1)), # output_shape=(batch, 128, 3, 8)
tf.keras.layers.MaxPool2D((3, 3)), # (batch, 42, 1, 8)
tf.keras.layers.Dropout(0.1), # (batch, 42, 1, 8)
tf.keras.layers.Conv2D(16, (4, 1), padding="same",
activation="relu"), # (batch, 42, 1, 16)
tf.keras.layers.MaxPool2D((3, 1), padding="same"), # (batch, 14, 1, 16)
tf.keras.layers.Dropout(0.1), # (batch, 14, 1, 16)
tf.keras.layers.Flatten(), # (batch, 224)
tf.keras.layers.Dense(16, activation="relu"), # (batch, 16)
tf.keras.layers.Dropout(0.1), # (batch, 16)
tf.keras.layers.Dense(4, activation="softmax") # (batch, 4)
When loading the model I don't see these layers anywhere. Also searching over the whole codebase did't not bring much clarity.
static tflite::MicroMutableOpResolver<5> micro_op_resolver; // NOLINT
micro_op_resolver.AddConv2D();
micro_op_resolver.AddDepthwiseConv2D();
micro_op_resolver.AddFullyConnected();
micro_op_resolver.AddMaxPool2D();
micro_op_resolver.AddSoftmax();

Related

How to include take the absolute value in objective function solving using pyomo and glpk

I have to find the optimum cost of building links between nodes. In my objective function, I am trying to minimise the cost. The problem can be solved to determine the variable, however the optimal value of my cost is incorrect as I want it to take the absolute value of each cost. How can I modify my codes as I cannot use abs() in the objective function?
cost+=(model.a[i,k]-model.a[j,k])model.cmodel.d[i,j].
This value can be negative if model.a[j,k]=1 or positive if model.a[i,k]=1
from pyomo.environ import *
# Creation of a Concrete Model
model = ConcreteModel()
# Sets
model.i = Set(initialize=[1,2,3,4,5,6,7,8,9,10,11,12,13], doc='Nodes')
model.k = Set(initialize=['Orange','SFR', 'Bouygues'], doc='Companies')
# Parameters
model.c = Param(initialize=25, doc='Cost of transforming an existing link into a backbone link in euro/km')
links={
(1, 2) : 1.8,
(1, 7) : 1,
(1, 13) : 5.4,
(2, 8) : 2.3,
(2, 3) : 1.7,
(2, 5) : 7,
(2, 7) : 2,
(2, 12) : 3,
(3, 4) : 2,
(3, 10) : 6.5,
(4, 5) : 1,
(4, 6) : 2,
(5, 8) : 5,
(5, 10) : 1,
(5, 11) : 1.5,
(6, 11) : 2.1,
(7, 12) : 2,
(8, 9) : 2,
(8, 13) : 0.7,
(9, 10) : 1.1,
(10, 11) : 1,
(12, 13) : 2.5,
}
model.d=Param(model.i, model.i,default=0, initialize=links, doc='distance in 10 km between nodes')
# Variables
model.a = Var(model.i, model.k, within=Binary, doc='Binary variable indicating whether node i belongs to company k (0 if it does not belong and 1 if it belongs)')
#Contraints#
def allocation_rule(model, i):
return sum(model.a[i,k] for k in model.k) == 1
model.allocation = Constraint(model.i, rule=allocation_rule, doc='Each node can only belong to one company')
def minimum_rule(model, k):
return sum(model.a[i,k] for i in model.i) >= 2
model.minimum = Constraint(model.k, rule=minimum_rule, doc='Each company must have at least 2 nodes')
#objective
def totalcost(model):
cost=0
for i in model.i:
for j in model.i:
if model.d[i,j]!=0:
for k in model.k:
cost+=(model.a[i,k]-model.a[j,k])*model.c*model.d[i,j]
return cost
model.z = Objective(rule=totalcost, sense=minimize, doc='Minimize the cost of implementing a backbone connecting the three sub-networks')
def total(model):
return model.cost_postive-model.cost_negative
## Display of the output ##
optimizer = SolverFactory("glpk",executable='/usr/bin/glpsol') #creates an optimizer object that uses the glpk package installed to your usr/bin.
optimizer.solve(model) #tells your optimizer to solve the model object
model.display()
I have tried using the cost+=abs((model.a[i,k]-model.a[j,k])model.cmodel.d[i,j]) but this makes the problem non-linear so it cannot be solved.
edited to introduce a new variable p, and added 2 constraints to p>=(model.a[i,k]-model.a[j,k])model.cmodel.d[i,j]) and
p>=-(model.a[i,k]-model.a[j,k])model.cmodel.d[i,j]). However, it returns with error: ERROR:pyomo.core:Rule failed for Param 'd' with index (1, 2):
from pyomo.environ import *
# Creation of a Concrete Model
model = ConcreteModel()
# Sets
model.i = Set(initialize=[1,2,3,4,5,6,7,8,9,10,11,12,13],
doc='Nodes')
model.i = Set(initialize=['Orange','SFR', 'Bouygues'],
doc='Companies')
# Parameters
model.c = Param(initialize=25, doc='Cost of transforming an
existing link into a backbone link in euro/km')
links={
(1, 2) : 1.8,
(1, 7) : 1,
(2, 3) : 1.7,
(2, 5) : 7,
(2, 7) : 2,
(2, 12) : 3,
(3, 4) : 2,
(3, 10) : 6.5,
(4, 5) : 1,
(4, 6) : 2,
(5, 8) : 5,
(5, 10) : 1,
(5, 11) : 1.5,
(6, 11) : 2.1,
(7, 12) : 2,
(8, 9) : 2,
(8, 13) : 0.7,
(9, 10) : 1.1,
(10, 11) : 1,
(12, 13) : 2.5,
(1, 13) : 5.4,
(2, 8) : 2.3,
}
model.d=Param(model.i, model.i,default=0, initialize=links, doc='distance in 10 km between nodes')
# Variables
model.a = Var(model.i, model.k, within=Binary, doc='Binary variable indicating whether node i belongs to company k (0 if it does not belong and 1 if it belongs)')
model.p = Var(model.i,model.k, within=(0.0,None), doc='Cost of building backbone link p_ij')
#Contraints#
def allocation_rule(model, i):
return sum(model.a[i,k] for k in model.k) == 1
model.allocation = Constraint(model.i, rule=allocation_rule, doc='Each node can only belong to one company')
def minimum_rule(model, k):
return sum(model.a[i,k] for i in model.i) >= 2
model.minimum = Constraint(model.k, rule=minimum_rule, doc='Each company must have at least 2 nodes')
def absolute_rule1(model):
return model.p >=(model.a[i,k]-
model.a[j,k])*model.c*model.d[i,j]
model.absolute1 = Constraint(model.i, rule=absolute_rule1, doc='To take the positive cost')
def absolute_rule2(model):
for i in model.i:
for j in model.i:
if model.d[i,j]!=0:
for k in model.k:
return model.p >=-(model.a[i,k]-
model.a[j,k])*model.c*model.d[i,j]
model.absolute2 = Constraint(model.i, rule=absolute_rule2, doc='To take the positive cost')
#objective
def totalcost(model):
cost=0
for i in model.i:
for j in model.i:
if model.d[i,j]!=0:
for k in model.k:
cost+=model.p
return cost
model.z = Objective(rule=totalcost, sense=minimize, doc='Minimize the cost of implementing a backbone connecting the three sub-networks')
Below is a slightly modified approach.
You could put in the helper variables to get to absolute value, but I think that might lead you a bit astray in your objective, as I mentioned in the comment. Specifically, if you have 3 companies, the best you could do for "ownership" would be 1 company owning it, so as you summed over all three companies, you would get one "zero" cost and two actual costs, which is probably not desired.
I reformulated a bit to something which kinda does the same thing with a couple new variables. Realize there is "upward pressure" in the model for link ownership... cost is reduced (good) if more links are owned, so the variable I put in assesses each link by company and only allows ownership if they own both nodes.
The other new variable indicates whether a link is owned or not, independent of company. I think you could probably do without that, but it adds a little clarity. You could get the same thing (remove the variable, I think) by observing:
build_link >= 1 - sum(own_link)
Also, a reminder... I didn't see in your original code that you were inspecting the solver results. Always, always, always do that to ensure the status is "optimal" or you are looking at junk response.
Code:
from pyomo.environ import *
links={
(1, 2) : 1.8,
(1, 7) : 1,
(1, 13) : 5.4,
(2, 8) : 2.3,
(2, 3) : 1.7,
(2, 5) : 7,
(2, 7) : 2,
(2, 12) : 3,
(3, 4) : 2,
(3, 10) : 6.5,
(4, 5) : 1,
(4, 6) : 2,
(5, 8) : 5,
(5, 10) : 1,
(5, 11) : 1.5,
(6, 11) : 2.1,
(7, 12) : 2,
(8, 9) : 2,
(8, 13) : 0.7,
(9, 10) : 1.1,
(10, 11) : 1,
(12, 13) : 2.5,
}
# Creation of a Concrete Model
model = ConcreteModel()
# Sets
model.i = Set(initialize=[1,2,3,4,5,6,7,8,9,10,11,12,13], doc='Nodes')
model.k = Set(initialize=['Orange','SFR', 'Bouygues'], doc='Companies')
model.links = Set(within=model.i*model.i, initialize=links.keys())
# Parameters
model.c = Param(initialize=25, doc='Cost of transforming an existing link into a backbone link in euro/km')
model.d = Param(model.links, default=0, initialize=links, doc='distance in 10 km between nodes')
# Variables
model.a = Var(model.i, model.k, within=Binary, doc='Binary variable indicating whether node i belongs to company k (0 if it does not belong and 1 if it belongs)')
model.own_link = Var(model.links, model.k, within=Binary, doc='Own the link')
model.build_link = Var(model.links, within=Binary, doc='build link')
#Contraints#
def allocation_rule(model, i):
return sum(model.a[i,k] for k in model.k) == 1
model.allocation = Constraint(model.i, rule=allocation_rule, doc='Each node can only belong to one company')
def minimum_rule(model, k):
return sum(model.a[i,k] for i in model.i) >= 2
model.minimum = Constraint(model.k, rule=minimum_rule, doc='Each company must have at least 2 nodes')
def link_owner(model, k, n1, n2):
return model.own_link[n1, n2, k] <= 0.5 * (model.a[n1, k] + model.a[n2, k])
model.link1 = Constraint(model.k, model.links, rule=link_owner)
# link the "build link" variable to lack of link ownership
def link_build(model, *link):
return model.build_link[link] >= 1 - sum(model.own_link[link, k] for k in model.k)
model.build_constraint = Constraint(model.links, rule=link_build)
# objective
cost = sum(model.build_link[link]*model.c*model.d[link] for link in model.links)
model.z = Objective(expr=cost, sense=minimize, doc='Minimize the cost of implementing a backbone connecting the three sub-networks')
## Display of the output ##
optimizer = SolverFactory("glpk") #creates an optimizer object that uses the glpk package installed to your usr/bin.
result = optimizer.solve(model) #tells your optimizer to solve the model object
print(result)
print('Link Ownership Plan:')
for idx in model.own_link.index_set():
if model.own_link[idx].value: # will be true if it is 1, false if 0
print(idx, model.own_link[idx].value)
print('\nLink Build Plan:')
for idx in model.build_link.index_set():
if model.build_link[idx].value: # will be true if it is 1, false if 0
print(idx, model.build_link[idx].value)
Output:
Problem:
- Name: unknown
Lower bound: 232.5
Upper bound: 232.5
Number of objectives: 1
Number of constraints: 105
Number of variables: 128
Number of nonzeros: 365
Sense: minimize
Solver:
- Status: ok
Termination condition: optimal
Statistics:
Branch and bound:
Number of bounded subproblems: 2183
Number of created subproblems: 2183
Error rc: 0
Time: 0.21333098411560059
Solution:
- number of solutions: 0
number of solutions displayed: 0
Link Ownership Plan:
(1, 2, 'Orange') 1.0
(1, 7, 'Orange') 1.0
(1, 13, 'Orange') 1.0
(2, 8, 'Orange') 1.0
(2, 5, 'Orange') 1.0
(2, 7, 'Orange') 1.0
(2, 12, 'Orange') 1.0
(3, 10, 'SFR') 1.0
(4, 6, 'Bouygues') 1.0
(5, 8, 'Orange') 1.0
(6, 11, 'Bouygues') 1.0
(7, 12, 'Orange') 1.0
(8, 9, 'Orange') 1.0
(8, 13, 'Orange') 1.0
(12, 13, 'Orange') 1.0
Link Build Plan:
(2, 3) 1.0
(3, 4) 1.0
(4, 5) 1.0
(5, 10) 1.0
(5, 11) 1.0
(9, 10) 1.0
(10, 11) 1.0

Pytorch tensor dimension multiplication

I'm trying to implement the grad-camm algorithm:
https://arxiv.org/pdf/1610.02391.pdf
My arguments are:
activations: Tensor with shape torch.Size([1, 512, 14, 14])
alpha values : Tensor with shape torch.Size([512])
I want to multiply each activation (in dimension index 1 (sized 512)) in each corresponding alpha value: for example if the i'th index out of the 512 in the activation is 4 and the i'th alpha value is 5, then my new i'th activation would be 20.
The shape of the output should be torch.Size([1, 512, 14, 14])
Assuming the desired output is of shape (1, 512, 14, 14).
You can achieve this with torch.einsum:
torch.einsum('nchw,c->nchw', x, y)
Or with a simple dot product, but you will first need to add a couple of additional dimensions on y:
x*y[None, :, None, None]
Here's an example with x.shape = (1, 4, 2, 2) and y = (4,):
>>> x = torch.arange(16).reshape(1, 4, 2, 2)
tensor([[[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15]]]])
>>> y = torch.arange(1, 5)
tensor([1, 2, 3, 4])
>>> x*y[None, :, None, None]
tensor([[[[ 0, 1],
[ 2, 3]],
[[ 8, 10],
[12, 14]],
[[24, 27],
[30, 33]],
[[48, 52],
[56, 60]]]])

How to select rows by a column value in D with mir.ndslice?

I am browsing through mir.ndslice docs trying to figure out how to do a simple row selection by column.
In numpy I would do:
a = np.random.randint(0, 20, [4, 6])
# array([[ 8, 5, 4, 18, 1, 4],
# [ 2, 18, 15, 7, 18, 19],
# [16, 5, 4, 6, 11, 11],
# [15, 1, 14, 6, 1, 4]])
a[a[:,2] > 10] # select rows where the second column value is > 10
# array([[ 2, 18, 15, 7, 18, 19],
# [15, 1, 14, 6, 1, 4]])
Using mir library I naively tried:
import std.range;
import std.random;
import mir.ndslice;
auto a = generate!(() => uniform(0, 20)).take(24).array.sliced(4,6);
// [[12, 19, 3, 10, 19, 11],
// [19, 0, 0, 13, 9, 1],
// [ 0, 0, 4, 13, 1, 2],
// [ 6, 19, 14, 18, 14, 18]]
a[a[0..$,2] > 10];
But got
Error: incompatible types for `((ulong __dollar = a.length();) , a.opIndex(a.opSlice(0LU, __dollar), 2)) > (10)`: `Slice!(int*, 1LU, cast(mir_slice_kind)0)` and `int`
dmd failed with exit code 1.
So, I went through the docs and couldn't find anything that would look like np.where or similar. Is it even possible in mir?

The output dimension of dense layer in tensorflow

I am writing a Alexnet of Cifar10 Implementation on tensorflow. The following is the code for the last few layers
## pool5
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
name='pool5')
print_activations(pool5)
# ## Flatten
# pool5=tf.contrib.layers.flatten(pool5, outputs_collections=None, scope=None)
## FC1
fc1=tf.layers.dense(pool5, 4096, activation=tf.nn.relu, trainable=True)
print_activations(fc1)
## FC2
fc2=tf.layers.dense(fc1, 4096, activation=tf.nn.relu, trainable=True)
print_activations(fc2)
## Ouput
out1=tf.layers.dense(fc2, 10, activation=None, trainable=True)
TO my understanding, the output of the last layer should be tensor with length 10. Considering batch size, it should be [None, 10]. However, when I return 'out1.shape.dims', I got [None, None, None, 10]. Anyone knows what I am missing? Thanks

Custom Layers for recursive interation bitween layers

I have a question. I'm trying to make a CNN, the return of model feeds first Dence layer. Can I help?
e.g:
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
I Should have the output value feeds layer Dense (512)
Dense (512 + 10, name='d1') output d2 with input here.
Activation()
Dropout(522)
Dense (10, name='d2')