Related
I wrote a sparse matrix class, based on Block compressed storage, I wrote almost all the method, but I have not idea to how to write the method findValue(i,j) that give 2 indexes of the original matrix ! the storage consists in four vectors :
`ba_': stored the non zero block (rectangular block in which almost one element is different from zero) of the matrix in top-down left-right order
an_ is the vector of index that points to the first element of the block in the vector ba
aj_ stored the index of the block columns in the blocked matrix.
ai_ stored the first block of each row in the blocked matrix.
the picture clarify anything :
here the following class in which I use two methods to achieve the result, findBlockIndex and findValue(i,j,Brows,Bcols) but I need to get the value of the original i,j index using findValue(i,j) in which i,j are the index in the sparse complete matrix
# include <iosfwd>
# include <vector>
# include <string>
# include <initializer_list>
# include "MatrixException.H"
# include <sstream>
# include <fstream>
# include <algorithm>
# include <iomanip>
// forward declarations
template <typename T, std::size_t R, std::size_t C>
class BCRSmatrix ;
template <typename T, std::size_t R, std::size_t C>
std::ostream& operator<<(std::ostream& os , const BCRSmatrix<T,R,C>& m );
template <typename T, std::size_t Br, std::size_t Bc >
std::vector<T> operator*(const BCRSmatrix<T,Br,Bc>& m, const std::vector<T>& x );
template <typename data_type, std::size_t BR , std::size_t BC>
class BCRSmatrix {
template <typename T, std::size_t R, std::size_t C>
friend std::ostream& operator<<(std::ostream& os , const BCRSmatrix<T,R,C>& m );
template <typename T, std::size_t Br,std::size_t Bc>
friend std::vector<T> operator*(const BCRSmatrix<T,Br,Bc>& m, const std::vector<T>& x );
public:
constexpr BCRSmatrix(std::initializer_list<std::vector<data_type>> dense );
constexpr BCRSmatrix(const std::string& );
virtual ~BCRSmatrix() = default ;
auto constexpr print_block(const std::vector<std::vector<data_type>>& dense,
std::size_t i, std::size_t j) const noexcept ;
auto constexpr validate_block(const std::vector<std::vector<data_type>>& dense,
std::size_t i, std::size_t j) const noexcept ;
auto constexpr insert_block(const std::vector<std::vector<data_type>>& dense,
std::size_t i, std::size_t j) noexcept ;
auto constexpr printBCRS() const noexcept ;
auto constexpr printBlockMatrix() const noexcept ;
auto constexpr size1() const noexcept { return denseRows ;}
auto constexpr size2() const noexcept { return denseCols ;}
auto constexpr printBlock(std::size_t i) const noexcept ;
auto constexpr print() const noexcept ;
private:
std::size_t bn ;
std::size_t bBR ;
std::size_t nnz ;
std::size_t denseRows ;
std::size_t denseCols ;
std::vector<data_type> ba_ ;
std::vector<std::size_t> an_ ;
std::vector<std::size_t> ai_ ;
std::vector<std::size_t> aj_ ;
std::size_t index =0 ;
auto constexpr findBlockIndex(const std::size_t r, const std::size_t c) const noexcept ;
auto constexpr recomposeMatrix() const noexcept ;
auto constexpr findValue(
const std::size_t i, const std::size_t j,
const std::size_t rBlock, const std::size_t cBlock
) const noexcept ;
};
//--------------------------- IMPLEMENTATION
template <typename T, std::size_t BR, std::size_t BC>
constexpr BCRSmatrix<T,BR,BC>::BCRSmatrix(std::initializer_list<std::vector<T>> dense_ )
{
this->denseRows = dense_.size();
auto it = *(dense_.begin());
this->denseCols = it.size();
if( (denseRows*denseCols) % BR != 0 )
{
throw InvalidSizeException("Error block size is not multiple of dense matrix size");
}
std::vector<std::vector<T>> dense(dense_);
bBR = BR*BC ;
bn = denseRows*denseCols/(BR*BC) ;
ai_.resize(denseRows/BR +1);
ai_[0] = 1;
for(std::size_t i = 0; i < dense.size() / BR ; i++)
{
auto rowCount =0;
for(std::size_t j = 0; j < dense[i].size() / BC ; j++)
{
if(validate_block(dense,i,j))
{
aj_.push_back(j+1);
insert_block(dense, i, j);
rowCount ++ ;
}
}
ai_[i+1] = ai_[i] + rowCount ;
}
printBCRS();
}
template <typename T, std::size_t BR, std::size_t BC>
constexpr BCRSmatrix<T,BR,BC>::BCRSmatrix(const std::string& fname)
{
std::ifstream f(fname , std::ios::in);
if(!f)
{
throw OpeningFileException("error opening file in constructor !");
}
else
{
std::vector<std::vector<T>> dense;
std::string line, tmp;
T elem = 0 ;
std::vector<T> row;
std::size_t i=0, j=0 ;
while(getline(f, line))
{
row.clear();
std::istringstream ss(line);
if(i==0)
{
while(ss >> elem)
{
row.push_back(elem);
j++;
}
}
else
{
while(ss >> elem)
row.push_back(elem);
}
dense.push_back(row);
i++;
}
this->denseRows = i;
this->denseCols = j;
bBR = BR*BR ;
bn = denseRows*denseCols/(BR*BC) ;
ai_.resize(denseRows/BR +1);
ai_[0] = 1;
for(std::size_t i = 0; i < dense.size() / BR ; i++)
{
auto rowCount =0;
for(std::size_t j = 0; j < dense[i].size() / BC ; j++)
{
if(validate_block(dense,i,j))
{
aj_.push_back(j+1);
insert_block(dense, i, j);
rowCount ++ ;
}
}
ai_[i+1] = ai_[i] + rowCount ;
}
}
printBCRS();
}
template <typename T,std::size_t BR, std::size_t BC>
inline auto constexpr BCRSmatrix<T,BR,BC>::printBlockMatrix() const noexcept
{
for(auto i=0 ; i < denseRows / BR ; i++)
{
for(auto j=1 ; j <= denseCols / BC ; j++)
{
std::cout << findBlockIndex(i,j) << ' ' ;
}
std::cout << std::endl;
}
}
template <typename T,std::size_t BR,std::size_t BC>
inline auto constexpr BCRSmatrix<T,BR,BC>::printBlock(std::size_t i) const noexcept
{
auto w = i-1 ;
auto k = 0;
for(std::size_t i = 0 ; i < BR ; ++i)
{
for(std::size_t j=0 ; j < BC ; ++j )
{
std::cout << std::setw(8) << ba_.at(an_.at(w)-1+k) << ' ';
k++;
}
}
}
template <typename T,std::size_t BR, std::size_t BC>
inline auto constexpr BCRSmatrix<T,BR,BC>::print_block(const std::vector<std::vector<T>>& dense,
std::size_t i, std::size_t j) const noexcept
{
for(std::size_t m = i * BR ; m < BR * (i + 1); ++m)
{
for(std::size_t n = j * BC ; n < BC * (j + 1); ++n)
std::cout << dense[m][n] << ' ';
std::cout << '\n';
}
}
template <typename T,std::size_t BR, std::size_t BC>
inline auto constexpr BCRSmatrix<T,BR,BC>::validate_block(const std::vector<std::vector<T>>& dense,
std::size_t i, std::size_t j) const noexcept
{
bool nonzero = false ;
for(std::size_t m = i * BR ; m < BR * (i + 1); ++m)
{
for(std::size_t n = j * BC ; n < BC * (j + 1); ++n)
{
if(dense[m][n] != 0) nonzero = true;
}
}
return nonzero ;
}
template <typename T,std::size_t BR, std::size_t BC>
inline auto constexpr BCRSmatrix<T,BR,BC>::insert_block(const std::vector<std::vector<T>>& dense,
std::size_t i, std::size_t j) noexcept
{
bool firstElem = true ;
for(std::size_t m = i * BR ; m < BR * (i + 1); ++m)
{
for(std::size_t n = j * BC ; n < BC * (j + 1); ++n)
{
if(firstElem)
{
an_.push_back(index+1);
firstElem = false ;
}
ba_.push_back(dense[m][n]);
index ++ ;
}
}
}
template <typename T, std::size_t BR,std::size_t BC>
auto constexpr BCRSmatrix<T,BR,BC>::findBlockIndex(const std::size_t r, const std::size_t c) const noexcept
{
for(auto j= ai_.at(r) ; j < ai_.at(r+1) ; j++ )
{
if( aj_.at(j-1) == c )
{
return j ;
}
}
}
template <typename T, std::size_t BR, std::size_t BC>
auto constexpr BCRSmatrix<T,BR,BC>::printBCRS() const noexcept
{
std::cout << "ba_ : " ;
for(auto &x : ba_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "an_ : " ;
for(auto &x : an_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "aj_ : " ;
for(auto &x : aj_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "ai_ : " ;
for(auto &x : ai_ )
std::cout << x << ' ' ;
std::cout << std::endl;
}
template <typename T, std::size_t BR, std::size_t BC>
auto constexpr BCRSmatrix<T,BR,BC>::print() const noexcept
{
//for each BCRS row
for(auto i=0 ; i < denseRows / BR ; i++){
//for each Block sub row.
for(auto rBlock = 0; rBlock < BR; rBlock++){
//for each BCSR col.
for(auto j = 1; j <= denseCols / BC; j++){
//for each Block sub col.
for(auto cBlock = 0; cBlock < BC; cBlock++){
std::cout<< findValue(i, j, rBlock, cBlock) <<'\t';
}
}
std::cout << std::endl;
}
}
}
template <typename T, std::size_t BR,std::size_t BC>
auto constexpr BCRSmatrix<T,BR,BC>::recomposeMatrix() const noexcept
{
std::vector<std::vector<T>> sparseMat(denseRows, std::vector<T>(denseCols, 0));
auto BA_i = 0, AJ_i = 0;
//for each BCSR row
for(auto r = 0; r < denseRows/BR; r++){
//for each Block in row
for(auto nBlock = 0; nBlock < ai_.at(r+1)-ai_.at(r); nBlock++){
//for each subMatrix (Block)
for(auto rBlock = 0; rBlock < BR; rBlock++){
for(auto cBlock = 0; cBlock < BC; cBlock++){
//insert value
sparseMat.at(rBlock + r*BR).at(cBlock + (aj_.at(AJ_i)-1)*BC) = ba_.at(BA_i);
++BA_i;
}
}
++AJ_i;
}
}
return sparseMat;
}
template <typename T, std::size_t BR,std::size_t BC>
auto constexpr BCRSmatrix<T,BR,BC>::findValue(
const std::size_t i, const std::size_t j,
const std::size_t rBlock, const std::size_t cBlock
) const noexcept
{
auto index = findBlockIndex(i,j);
if(index != 0)
return ba_.at(an_.at(index-1)-1 + cBlock + rBlock*BC);
else
return T(0);
}
template <typename T, std::size_t BR,std::size_t BC>
std::ostream& operator<<(std::ostream& os , const BCRSmatrix<T,BR,BC>& m )
{
for(auto i=0 ; i < m.denseRows / BR ; i++)
{
//for each Block sub row.
for(auto rBlock = 0; rBlock < BR; rBlock++)
{
//for each BCSR col.
for(auto j = 1; j <= m.denseCols / BC; j++)
{
//for each Block sub col.
for(auto cBlock = 0; cBlock < BC; cBlock++)
{
os << m.findValue(i, j, rBlock, cBlock) <<'\t';
}
}
os << std::endl;
}
}
return os;
}
template <typename T, std::size_t BR, std::size_t BC>
std::vector<T> operator*(const BCRSmatrix<T,BR,BC>& m, const std::vector<T>& x )
{
std::vector<T> y(x.size());
if(m.size1() != x.size())
{
std::string to = "x" ;
std::string mess = "Error occured in operator* attempt to perfor productor between op1: "
+ std::to_string(m.size1()) + to + std::to_string(m.size2()) +
" and op2: " + std::to_string(x.size());
throw InvalidSizeException(mess.c_str());
}
else
{
auto brows = m.denseRows/BR ;
auto bnze = m.an_.size() ;
auto z=0;
for(auto b=0 ; b < brows ; b++)
{
for(auto j= m.ai_.at(b) ; j <= m.ai_.at(b+1)-1; j++ )
{
for(auto k=0 ; k < BR ; k++ )
{
for(auto t=0 ; t < BC ; t++)
{
y.at(BC*b+k) += m.ba_.at(z) * x.at(BC*(m.aj_.at(j-1)-1)+t) ;
z++ ;
}
}
}
}
}
return y;
}
and this is the main
# include "BCSmatrix.H"
using namespace std;
int main(){
BCRSmatrix<int,2,2> bbcsr1 = {{11,12,13,14,0,0},{0,22,23,0,0,0},{0,0,33,34,35,36},{0,0,0,44,45,0},
{0,0,0,0,0,56},{0,0,0,0,0,66}};
BCRSmatrix<int,2,2> bbcsr2 = {{11,12,0,0,0,0,0,0} ,{0,22,0,0,0,0,0,0} ,{31,32,33,0,0,0,0,0},
{41,42,43,44,0,0,0,0}, {0,0,0,0,55,56,0,0},{0,0,0,0,0,66,67,0},{0,0,0,0,0,0,77,78},{0,0,0,0,0,0,87,88}};
BCRSmatrix<int,2,4> bbcsr3 = {{11,12,0,0,0,0,0,0} ,{0,22,0,0,0,0,0,0} ,{31,32,33,0,0,0,0,0},
{41,42,43,44,0,0,0,0}, {0,0,0,0,55,56,0,0},{0,0,0,0,0,66,67,0},{0,0,0,0,0,0,77,78},{0,0,0,0,0,0,87,88}};
bbcsr3.printBlockMatrix();
bbcsr3.print();
BCRSmatrix<int,2,2> bbcsr4("input17.dat");
bbcsr4.printBlockMatrix();
BCRSmatrix<int,2,4> bbcsr5("input18.dat");
bbcsr5.printBlockMatrix();
cout << bbcsr5 ;
BCRSmatrix<int,4,4> bbcsr6("input18.dat");
bbcsr6.printBlockMatrix();
bbcsr6.print();
cout << bbcsr4 ; //.print();
BCRSmatrix<int,2,4> bbcsr7("input20.dat");
cout << bbcsr7;
bbcsr7.printBlockMatrix();
std::vector<int> v1 = {3,4,0,1,6,8,1,19};
std::vector<int> v01 = {3,4,0,1,6,8,1,19,15,2};
std::vector<int> v2 = bbcsr4 *v1 ;
for(auto& x : v2)
cout << x << ' ' ;
cout << endl;
BCRSmatrix<double,2,2> bbcsr8("input21.dat");
bbcsr8.print() ;
bbcsr8.printBlockMatrix();
return 0;
}
how to write the method findValue(i,j) that give 2 indexes of the original matrix
It is similar to the previous findValue method:
template <typename T, std::size_t BR,std::size_t BC>
auto constexpr BCRSmatrix<T,BR,BC>::myNewfindValue(const std::size_t i, const std::size_t j) const noexcept{
auto index = findBlockIndex(i/BR, j/BC);
if(index != 0)
return ba_.at(an_.at(index-1)-1 + j%BC + (i%BR)*BC);
else
return T(0);
}
To recall this function: you have to do a little change to your findBlockIndex: just change if( aj_.at(j-1) == c ) whit if( aj_.at(j-1) == c+1 ), than you have to modify your for statements in the others functions for(auto j = 1; j <= .. whit for(auto j = 0; j < ...
Let me know if there are problems or this is not the answer you were looking for.
I hope to be of help to you,
best regards Marco.
rename the original findValue as findVal then define a new findValue that take exactly 2 element defined as follow (I know is orrible):
template <typename T, std::size_t BS>
T constexpr SqBCSmatrix<T,BS>::findValue(const std::size_t r, const std::size_t c) const noexcept
{
//for each BCRS row
for(auto i=0 ,k=0; i < denseRows / BS ; i++){
//for each Block sub row.
for(auto rBlock = 0; rBlock < BS; k++ ,rBlock++){
//for each BCSR col.
for(auto j = 1 , l=0; j <= denseCols / BS; j++){
//for each Block sub col.
for(auto cBlock = 0; cBlock < BS; l++ , cBlock++){
if(k == r && c == l )
return findVal(i,j,rBlock, cBlock);
}
}
}
}
return 0;
}
I'm interesting to create a class for storing sparse matrix in Block Compressed Sparse Row format this method of storage consist to subdivide the matrix into square block of size sz*sz and stored this block in a vector BA , here you can find most information about link
basically the matrix is stored using 4 vector :
BA contains the elements of the submatrices (blocks) stored in top-down left right order (the first block in the picture of size 2x2 is 11,12,0,22)
AN contains the indices of each starting block of the vector BA (in the pictur case the block size is 2x2 so it contains 1,5 ... )
AJ contains the column index of blocks in the matrix of blocks (the smaller one in the picture)
AI the row pointer vector , it store how many blocks there is in the i-th row ai[i+1]-a[i] = number of block in i-th row
I'm write the constructor for convert a matrix from dense format to BCRS format :
template <typename data_type, std::size_t SZ = 2 >
class BCSRmatrix {
public:
constexpr BCSRmatrix(std::initializer_list<std::vector<data_type>> dense );
auto constexpr validate_block(const std::vector<std::vector<data_type>>& dense,
std::size_t i, std::size_t j) const noexcept ;
auto constexpr insert_block(const std::vector<std::vector<data_type>>& dense,
std::size_t i, std::size_t j) noexcept ;
private:
std::size_t bn ;
std::size_t bSZ ;
std::size_t nnz ;
std::size_t denseRows ;
std::size_t denseCols ;
std::vector<data_type> ba_ ;
std::vector<std::size_t> an_ ;
std::vector<std::size_t> ai_ ;
std::vector<std::size_t> aj_ ;
std::size_t index =0 ;
};
template <typename T, std::size_t SZ>
constexpr BCSRmatrix<T,SZ>::BCSRmatrix(std::initializer_list<std::vector<T>> dense_ )
{
this->denseRows = dense_.size();
auto it = *(dense_.begin());
this->denseCols = it.size();
if( (denseRows*denseCols) % SZ != 0 )
{
throw InvalidSizeException("Error block size is not multiple of dense matrix size");
}
std::vector<std::vector<T>> dense(dense_);
bSZ = SZ*SZ ;
bn = denseRows*denseCols/(SZ*SZ) ;
ai_.resize(denseRows/SZ +1);
ai_[0] = 1;
for(std::size_t i = 0; i < dense.size() / SZ ; i++)
{
auto rowCount =0;
for(std::size_t j = 0; j < dense[i].size() / SZ ; j++)
{
if(validate_block(dense,i,j))
{
aj_.push_back(j+1);
insert_block(dense, i, j);
rowCount ++ ;
}
}
ai_[i+1] = ai_[i] + rowCount ;
}
printBCSR();
}
template <typename T,std::size_t SZ>
inline auto constexpr BCSRmatrix<T,SZ>::validate_block(const std::vector<std::vector<T>>& dense,
std::size_t i, std::size_t j) const noexcept
{
bool nonzero = false ;
for(std::size_t m = i * SZ ; m < SZ * (i + 1); ++m)
{
for(std::size_t n = j * SZ ; n < SZ * (j + 1); ++n)
{
if(dense[m][n] != 0) nonzero = true;
}
}
return nonzero ;
}
template <typename T,std::size_t SZ>
inline auto constexpr BCSRmatrix<T,SZ>::insert_block(const std::vector<std::vector<T>>& dense,
std::size_t i, std::size_t j) noexcept
{
//std::size_t value = index;
bool firstElem = true ;
for(std::size_t m = i * SZ ; m < SZ * (i + 1); ++m)
{
for(std::size_t n = j * SZ ; n < SZ * (j + 1); ++n)
{
if(firstElem)
{
an_.push_back(index+1);
firstElem = false ;
}
ba_.push_back(dense[m][n]);
index ++ ;
}
}
template <typename T, std::size_t SZ>
auto constexpr BCSRmatrix<T,SZ>::printBCSR() const noexcept
{
std::cout << "ba_ : " ;
for(auto &x : ba_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "an_ : " ;
for(auto &x : an_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "aj_ : " ;
for(auto &x : aj_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "ai_ : " ;
for(auto &x : ai_ )
std::cout << x << ' ' ;
std::cout << std::endl;
}
And the main function for test the class :
# include "BCSRmatrix.H"
using namespace std;
int main(){
BCSRmatrix<int,2> bbcsr2 = {{11,12,0,0,0,0,0,0} ,{0,22,0,0,0,0,0,0} ,{31,32,33,0,0,0,0,0},
{41,42,43,44,0,0,0,0}, {0,0,0,0,55,56,0,0},{0,0,0,0,0,66,67,0},{0,0,0,0,0,0,77,78},{0,0,0,0,0,0,87,88}};
BCSRmatrix<int,4> bbcsr3 = {{11,12,0,0,0,0,0,0} ,{0,22,0,0,0,0,0,0} ,{31,32,33,0,0,0,0,0},
{41,42,43,44,0,0,0,0}, {0,0,0,0,55,56,0,0},{0,0,0,0,0,66,67,0},{0,0,0,0,0,0,77,78},{0,0,0,0,0,0,87,88}};
return 0;
}
Now back to the question .. I obtain the 4 vector as in the picture .. but what about backing from this 4 vector to the dense matrix ?
for example how to print out the whole matrix ?
Edit : I've figure out the way to plot the "blocks matrix" the smaller in the picture with relative index of vector AN:
template <typename T,std::size_t SZ>
inline auto constexpr BCSRmatrix<T,SZ>::printBlockMatrix() const noexcept
{
for(auto i=0 ; i < denseRows / SZ ; i++)
{
for(auto j=1 ; j <= denseCols / SZ ; j++)
{
std::cout << findBlockIndex(i,j) << ' ' ;
}
std::cout << std::endl;
}
}
template <typename T, std::size_t SZ>
auto constexpr BCSRmatrix<T,SZ>::findBlockIndex(const std::size_t r, const std::size_t c) const noexcept
{
for(auto j= ai_.at(r) ; j < ai_.at(r+1) ; j++ )
{
if( aj_.at(j-1) == c )
{
return j ;
}
}
}
that when in the main I call :
bbcsr3.printBlockMatrix();
Give me the right result :
1 0 0 0
2 3 0 0
0 0 4 5
0 0 0 6
Now just the whole matrix missing I think that I missed something in may mind .. but should be something easy but I didn't got the point .. any ideas ?
what about backing from this 4 vector to the dense matrix ? for example how to print out the whole matrix ?
Back to the sparse matrix:
template <typename T, std::size_t SZ>
auto constexpr BCSRmatrix<T,SZ>::recomposeMatrix() const noexcept {
std::vector<std::vector<data_type>> sparseMat(denseRows, std::vector<data_type>(denseCols, 0));
auto BA_i = 0, AJ_i = 0;
//for each BCSR row
for(auto r = 0; r < denseRows/SZ; r++){
//for each Block in row
for(auto nBlock = 0; nBlock < ai_.at(r+1)-ai_.at(r); nBlock++){
//for each subMatrix (Block)
for(auto rBlock = 0; rBlock < SZ; rBlock++){
for(auto cBlock = 0; cBlock < SZ; cBlock++){
//insert value
sparseMat.at(rBlock + r*SZ).at(cBlock + (aj_.at(AJ_i)-1)*SZ) = ba_.at(BA_i);
++BA_i;
}
}
++AJ_i;
}
}
return sparseMat;
}
Where:
BA_i and AJ_i are iterators of the respective vectors.
nBlock keeps the numbers of blocks in row given by ai_.
rBlock and cBlockare the iterators of the sub-matrix sz*sz called "Block".
note: an_ remain unused, you can try replacing BA_i whit it.
Print the matrix:
std::vector<std::vector<int>> sparseMat = bbcsr2.recomposeMatrix();
for(auto i = 0; i < sparseMat.size(); i++){
for(auto j = 0; j < sparseMat.at(i).size(); j++)
std::cout<<sparseMat.at(i).at(j) << '\t';
std::cout << std::endl;
}
I'm not sure I wrote the template correctly, anyway the algorithm should work; let me know if there are problems.
EDIT
make sense in a class that is created for saving time and memory storing sparse matrix it certain way than use a vector for reconstruct the whole matrix ?
You're right, my fault; I thought the problem was recompose the Matrix.
I rewritten the methods using findBlockIndex as a reference.
template <typename T, std::size_t SZ>
auto constexpr BCSRmatrix<T,SZ>::printSparseMatrix() const noexcept {
//for each BCSR row
for(auto i=0 ; i < denseRows / SZ ; i++){
//for each Block sub row.
for(auto rBlock = 0; rBlock < SZ; rBlock++){
//for each BCSR col.
for(auto j = 1; j <= denseCols / SZ; j++){
//for each Block sub col.
for(auto cBlock = 0; cBlock < SZ; cBlock++){
std::cout<< findValue(i, j, rBlock, cBlock) <<'\t';
}
}
std::cout << std::endl;
}
}
}
template <typename T, std::size_t SZ>
auto constexpr BCSRmatrix<T,SZ>::findValue(const std::size_t i, const std::size_t j, const std::size_t rBlock, const std::size_t cBlock) const noexcept {
auto index = findBlockIndex(i,j);
if(index != 0)
return ba_.at(an_.at(index-1)-1 + cBlock + /* rBlock*2 */ rBlock*SZ);
}
I hope to be of help to you,
best regards Marco.
I'm implementing a matrix class in modified compressed sparse column format , I have not idea to how to perform the product , this matrix store all the non zero element in 2 vector (value and index) in particular this format of storing consist of 2 container construct in this way:
aa_ the vector of value, stored in is first matrix-dim element the value of the diagonal, and then all the non zero value off-diagonal
ja_ stored in it's first matrix-dim element the number of non zero off-diagonal element in this way :ja[0]=matrix.dimension +1 then ja_[i] -ja_[i+1] = nnz element of column i+1 , and in the ja[ja_[i]] = index_of_row of the non zero element.
If you would read something about this format you can look here Modified compressed format
I've implemented a class but I would figure out how to perform the matrix product, I hope somebody can help me about
# include <iosfwd>
# include <initializer_list>
# include <iomanip>
# include <cassert>
# include <cmath>
# include <vector>
template <typename data_type> class MCSCmatrix ;
template <typename T>
std::vector<T> operator*(const MCSCmatrix<T>& A ,const std::vector<T>& x)noexcept ;
template <typename data_type>
class MCSCmatrix {
public:
template <typename T>
friend std::vector<T> operator*(const MCSCmatrix<T>& A ,const std::vector<T>& x) noexcept ;
auto constexpr printMCSC() const noexcept ;
private:
std::vector<data_type> aa_ ; // non zero value value
std::vector<std::size_t> ja_ ;
std::size_t dim ;
};
template <typename T>
inline constexpr MCSCmatrix<T>::MCSCmatrix( std::initializer_list<std::vector<T>> row)
{
this->dim = row.size();
auto il = *(row.begin());
if(this-> dim != il.size())
{
throw InvalidSizeException("Matrix Must be square in Modified CSC format ");
}
std::vector<std::vector<T>> temp(row);
aa_.resize(dim+1);
ja_.resize(dim+1);
//std::size_t elemCount = 0;
ja_[0] = dim+2 ;
auto elemCount = 0;
for(auto c = 0 ; c < temp[0].size() ; c++ )
{
elemCount =0 ;
for(auto r = 0 ; r < temp.size() ; r++)
{
if(c==r)
{
aa_[c] = temp[r][c] ;
}
else if(c != r && temp[r][c] !=0)
{
aa_.push_back(temp[r][c]);
ja_.push_back(r+1);
elemCount++ ;
}
}
ja_[c+1] = ja_[c] + elemCount ;
}
printMCSC();
}
template <typename T>
inline auto constexpr MCSCmatrix<T>::printMCSC() const noexcept
{
std::cout << "aa: " ;
for(auto& x : aa_ )
std::cout << x << ' ' ;
std::cout << std::endl;
std::cout << "ja: " ;
for(auto& x : ja_ )
std::cout << x << ' ' ;
std::cout << std::endl;
}
template <typename T>
std::vector<T> operator*(const MCSCmatrix<T>& A ,const std::vector<T>& x) noexcept
{
assert(A.dim == x.size());
std::vector<T> b(x.size());
for(auto i=0 ; i < A.dim ; i++ )
b.at(i) = A.aa_.at(i) * x.at(i) ; // diagonal value
for(auto i=0; i< A.dim ; i++)
{
for(auto k=A.ja_.at(i)-1 ; k < A.ja_.at(i+1)-1 ; k++ )
{
b.at(A.ja_.at(k)-1) += A.aa_.at(k)* x.at(i);
}
}
return b;
}
and here the main function:
# include "ModCSCmatrix.H"
using namespace std;
int main(){
MCSCmatrix<int> m1 = {{11,12,13,14,0,0},{0,22,23,0,0,0},{0,0,33,34,35,36},{0,0,0,44,45,0},
{0,0,0,0,0,56},{0,0,0,0,0,66}};
m1.printMCSC();
MCSCmatrix<double> m100 = {{1.01, 0 , 2.34,0}, {0, 4.07, 0,0},{3.12,0,6.08,0},{1.06,0,2.2,9.9} };
std::vector<double> v1={0,1.3,4.2,0.8};
std::vector<double> v2 = m100*v1 ;
for(auto& x : v2)
cout << x << ' ' ;
cout << endl;
return 0;
}
I just don't know what to do with it...
The functions runs well in debug, but not in release.
I am trying to learn about artificial neural networks and C++ vectors.
Here is the code (in Python 2.7) that I'm writing in C++:
http://neuralnetworksanddeeplearning.com/chap1.html#exercise_852508
(just scroll a little to reach it)
I'm using MinGW 7.2.0 from MSYS2 (C++11).
There are some "teste" prints inside the backpropagation method, that is where the problem is comming from (I guess). I also overloaded operators +, - and * to make things easier.
I know that there are some libs like Armadillo that could make things easier, but I really wanna use this problem to learn better.
And here is the files:
neuralnetwork.h
(I made everything public to make things easier to look at)
#define MIN_NUMBER_TOLERANCE 1e-8
namespace nn
{
class neuralnetwork
{
//private:
public:
//total number of weights. useful to reserve memory
int numWeights;
//total number of biases. useful to reserve memory
int numBiases;
//total number of layers: 1 for input, n hidden layers and 1 for output
int numLayers;
//a vector to store the number of neurons in each layer: 0 index is about the input layer, last index is about the output layer
std::vector<int> sizes;
//stores all biases: num of neurons of layer 1 + ... + num of neurons of layer (numLayers - 1) (input layer has no bias)
std::vector<std::vector<double>> biases;
//stores all weights: (num of neurons of layer 1) x (num of neurons of layer ) + ... + ( num of neurons of layer (numLayers - 1) ) x ( num of neurons of layer (numLayers - 2) ) (input layer has no bias)
std::vector<std::vector<std::vector<double>>> weights;
//stores the output of each neuron of each layer
std::vector<std::vector<double>> layersOutput;
std::vector<std::vector<std::vector<double>>> derivativeWeights;
std::vector<std::vector<double>> derivativeBiases;
std::default_random_engine generator;
std::normal_distribution<double> distribution;
double randomNormalNumber(void);
double costDerivatives(const double&, const double&);
std::vector<double> costDerivatives(const std::vector<double> &, const std::vector<double> &);
void backPropagation(const std::vector<double>& neuralNetworkInputs, const std::vector<double>& expectedOutputs, // inputs
std::vector<std::vector<std::vector<double>>>& derivativeWeights, std::vector<std::vector<double>>& derivativeBiases); // outputs
void update_mini_batch( const std::vector<std::pair<std::vector<double>,std::vector<double>>> & mini_batch, double eta);
//public:
neuralnetwork(const std::vector<int>& sizes);
std::vector<double> feedforward(const std::vector<double>&);
};
std::vector<double> sigmoid(const std::vector<double> &);
double sigmoid(double);
std::vector<double> sigmoid_prime(const std::vector<double> &);
//double sigmoid_prime(double);
}
neuralnetwork.cpp
#include "neuralnetwork.h"
#include <iostream>
#include <assert.h>
#include <algorithm>
namespace nn
{
int counter = 0;
neuralnetwork::neuralnetwork(const std::vector<int> &sizes)
{
this->distribution = std::normal_distribution<double>( 0.0 , 1.0 );
this->numLayers = sizes.size();
this->sizes = sizes;
this->numWeights = 0;
this->numBiases = 0;
for ( int i = 1 ; i < this->numLayers ; i++ )
{
numWeights += this->sizes[ i ] * this->sizes[ i - 1 ];
numBiases += this->sizes[ i ];
}
this->weights.reserve( numWeights );
this->biases.reserve( numBiases );
this->derivativeWeights.reserve( numWeights );
this->derivativeBiases.reserve( numBiases );
this->layersOutput.reserve( this->sizes[ 0 ] + numBiases );
std::vector<double> auxVectorWeights;
std::vector<std::vector<double> > auxMatrixWeights;
std::vector<double> auxVectorBiases;
#ifdef DEBUG_BUILD
std::cout << "debugging!\n";
#endif
//just to accommodate the input layer with null biases and inputs (makes things easier to iterate and reading :D).
this->layersOutput.push_back( std::vector<double>( this->sizes[ 0 ] ) );
std::vector<std::vector<double>> matrixNothing( 0 );
this->weights.push_back( matrixNothing );
this->biases.push_back( std::vector<double>( 0 ) );
//since the second layer (index 1) because there is no weights (nor biases) for the neurons of the first layer
for ( int layer = 1 ; layer < this->numLayers ; layer++ )
{
//preallocate memory for the output of each layer.
layersOutput.push_back( std::vector<double>( this->sizes[ layer ] ) );
//-----------weights begin--------------
//auxMatrixWeights will store the weights connections between one layer (number of columns) and its subsequent layer (number of rows)
//auxMatrixWeights = new std::vector(this->sizes[layer], std::vector<double>( this->sizes[layer - 1] )); // it is not working...
//size[layer] stores the number of neurons on the layer
for ( int i = 0 ; i < this->sizes[ layer ] ; i++ )
{
//auxVectorWeights will have the size of the amount of wights necessary to connect the neuron i (from this layer) to neuron j (from next layer)
auxVectorWeights = std::vector<double>( this->sizes[ layer - 1 ] );
for ( int j = 0 ; j < auxVectorWeights.size() ; j++ )
{
auxVectorWeights[ j ] = this->randomNormalNumber();
}
auxMatrixWeights.push_back( auxVectorWeights );
}
this->weights.push_back( auxMatrixWeights );
auxMatrixWeights.clear();
//-----------weights end----------------
//-----------biases begin---------------
auxVectorBiases = std::vector<double>( this->sizes[ layer ] );
for ( int i = 0 ; i < auxVectorBiases.size() ; i++ )
{
auxVectorBiases[ i ] = this->randomNormalNumber();
}
this->biases.push_back( auxVectorBiases );
//-----------biases end-----------------
}
#ifdef _DEBUG
for ( int i = 0 ; i < this->weights.size() ; i++ )
{
std::cout << "layer " << i << "\n";
for ( int j = 0 ; j < this->weights[ i ].size() ; j++ )
{
std::cout << "neuron" << j << std::endl;
for ( const auto k : this->weights[ i ][ j ] )
{
std::cout << '\t' << k << ' ';
}
std::cout << std::endl;
}
}
#endif
}
template <class T>
inline int lastIndex(std::vector<T> vector , int tail)
{
return (vector.size() - tail);
}
double neuralnetwork::randomNormalNumber(void)
{
return this->distribution( this->generator );
}
double sigmoid(double z)
{
return 1.0 / ( 1.0 + exp( -z ) );
}
std::vector<double> sigmoid(const std::vector<double> & z)
{
int max = z.size();
std::vector<double> output;
output.reserve(max);
for(int i=0;i<max;i++)
{
output.push_back(0);
output[i] = 1.0 / ( 1.0 + exp( -z[i] ) );
}
return output;
}
/*double sigmoid_prime(double z)
{
return sigmoid( z ) * ( 1 - sigmoid( z ) );
}*/
std::vector<double> sigmoid_prime(const std::vector<double>& z)
{
int max = z.size();
std::vector<double> output;
output.reserve(max);
for(int i=0;i<max;i++)
{
output.push_back(sigmoid( z[i] ) * ( 1 - sigmoid( z[i] ) ) );
}
return output;
}
//scalar times vector
std::vector<double> operator* (double a , const std::vector<double> & b)
{
int size = b.size();
std::vector<double> result(size);
for ( int i = 0 ; i < size ; i++ )
{
result[i] = a * b[ i ];
}
return result;
}
// inner product
std::vector<double> operator* (const std::vector<double> & a , const std::vector<double> & b)
{
#ifdef _DEBUG
assert(a.size() == b.size());
#endif
int size = a.size(); // or b.size(). they should have the same size.
std::vector<double> result;
result.reserve(size); // or b.size(). they should have the same size.
for ( int i = 0 ; i < size ; i++ )
{
result.push_back( a[ i ] * b[ i ] );
}
return result;
}
//matrix times columns vector
std::vector<double> operator* (const std::vector<std::vector<double>> & a , const std::vector<double> & b)
{
#ifdef _DEBUG
assert(a[0].size() == b.size());
for(int i = 0 ; i < ( lastIndex( a , 1 )) ; i++)
{
assert(a[i].size() == a[i+1].size());
}
#endif
int lines = a.size();
int columns = a[0].size();
std::vector<double> result;
result.reserve(lines);
int j = 0;
for ( int i = 0 ; i < lines ; i++ )
{
result.push_back(0);
for(j = 0 ; j < columns ; j++)
{
result[i] += a[ i ][ j ] * b[ j ];
}
}
return result;
}
//scalar times matrix (calls scalar times vector)
std::vector<std::vector<double>> operator* (double a , const std::vector<std::vector<double>> & b)
{
#ifdef _DEBUG
for(int i = 0 ; i < b.size()-1 ; i++)
{
assert(b[i].size() == b[i+1].size());
}
#endif
int lines = b.size();
int columns = b[0].size();
std::vector<std::vector<double>> result;
int j = 0;
for ( int i = 0 ; i < lines ; i++ )
{
result.push_back(a * b[ j ]);
}
return result;
}
std::vector<double> operator+(const std::vector<double>& a, const std::vector<double>& b)
{
assert(a.size() == b.size());
int size = a.size();
std::vector<double> result;
result.reserve(size);
for(int i = 0 ; i < size ; i++)
{
result.push_back(0);
result[i] = a[i] + b[i];
}
return result;
}
//sum of matrices
std::vector<std::vector<double>> operator+(const std::vector<std::vector<double>>& a, const std::vector<std::vector<double>>& b)
{
#ifdef _DEBUG
assert(a.size() == b.size());
#endif
int size = a.size();
#ifdef _DEBUG
for(int i = 0 ; i < size ; i++)
{
assert(a[i].size() == b[i].size());
}
#endif
std::vector<std::vector<double>> result;
result.resize(size);
for(int i = 0 ; i < size ; i++)
{
result.push_back(a[i] + b[i]);
}
return result;
}
//subtraction of vectors
std::vector<double> operator-(const std::vector<double>& a, const std::vector<double>& b)
{
#ifdef _DEBUG
assert(a.size() == b.size());
#endif
int size = a.size();
std::vector<double> result;
result.resize(size);
for(int i = 0 ; i < size ; i++)
{
result[i] = a[i] - b[i];
}
return result;
}
//subtraction of matrices (calls subtraction of vectors)
std::vector<std::vector<double>> operator-(const std::vector<std::vector<double>>& a, const std::vector<std::vector<double>>& b)
{
#ifdef _DEBUG
assert(a.size() == b.size());
#endif
int size = a.size();
#ifdef _DEBUG
for(int i = 0 ; i < size ; i++)
{
assert(a[i].size() == b[i].size());
}
#endif
std::vector<std::vector<double>> result;
result.resize(size);
for(int i = 0 ; i < size ; i++)
{
result.push_back(a[i] - b[i]);
}
return result;
}
//elementwise division
std::vector<double> operator/(const std::vector<double>& a, const std::vector<double>& b)
{
assert(a.size() == b.size());
int size = a.size();
std::vector<double> result;
result.reserve(size);
for(int i = 0 ; i < size ; i++)
{
if(b[i] < MIN_NUMBER_TOLERANCE)
{
throw std::runtime_error("Can't divide by zero!");
}
result[i] = a[i] / b[i];
}
return result;
}
double neuralnetwork::costDerivatives(const double &networkOutput , const double &expectedOutput)
{
return expectedOutput - networkOutput;
}
std::vector<double> neuralnetwork::costDerivatives(const std::vector<double> &networkOutput , const std::vector<double> &expectedOutput)
{
assert(expectedOutput.size() == networkOutput.size());
int size = networkOutput.size();
std::vector<double> output;
output.reserve(size);
for(int i = 0 ; i < size ; i++)
{
output.push_back(networkOutput[i] - expectedOutput[i]);
}
return output;
}
void neuralnetwork::backPropagation(const std::vector<double> &neuralNetworkInputs , const std::vector<double> &expectedOutputs, // inputs
std::vector<std::vector<std::vector<double>>>& derivativeWeights , std::vector<std::vector<double>>& derivativeBiases) // outputs
{
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
derivativeWeights.reserve( sizes.size() - 1 );
derivativeBiases.reserve( sizes.size() - 1 );
//to store one activation layer
std::vector<double> activation = neuralNetworkInputs;
//to store each one of the activation layers
std::vector<std::vector<double>> activations;
activations.reserve(sizes.size()); // numBiases is the same as the number of neurons (except 1st layer)
activations.push_back(activation);
int maxLayerSize = 0;
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
for ( int i = 1 ; i < numBiases ; i++ )
{
maxLayerSize = std::max(sizes[i], maxLayerSize);
}
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
// to store one weighted sum
std::vector<double> z;
z.reserve(maxLayerSize);
// to store each one of the weighted sums
std::vector<std::vector<double>> zs;
zs.reserve(sizes.size());
// layer and neuron counter
int layer, neuron;
for ( layer = 1 ; layer < numLayers ; layer++ )
{
z = (weights[layer] * activation) + biases[layer];
zs.push_back(z);
activation = sigmoid(z);
activations.push_back(activation);
}
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
std::vector<double> delta = costDerivatives(activations[ lastIndex( activations , 1 )] , expectedOutputs) * sigmoid_prime(z);
delta.reserve(maxLayerSize);
derivativeBiases.push_back(delta);
int j;
std::vector<std::vector<double>> dummyMatrix;
dummyMatrix.reserve(maxLayerSize);
for (neuron = 0; neuron < sizes[ lastIndex( sizes , 1 )]; neuron++)
{
dummyMatrix.push_back(std::vector<double>(activations[ lastIndex( activations , 2 )].size()));
for (j = 0; j < activations[ lastIndex( activations , 2 )].size(); j++)
{
dummyMatrix[neuron][j] = delta[neuron] * activations[ lastIndex( activations , 2 )][j];
}
}
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
derivativeWeights.push_back(dummyMatrix);
dummyMatrix.clear();
std::vector<double> sp;
sp.reserve(maxLayerSize);
std::vector<double> dummyVector;
dummyVector.reserve(maxLayerSize);
double dummyDouble = 0;
for(layer = 2 ; layer < numLayers ; layer++)
{
z = zs[ lastIndex( zs , layer )];
sp = sigmoid_prime(z);
for(j = 0 ; j < sizes[ lastIndex( weights , layer )] ; j++)
{
for (neuron = 0; neuron < sizes[ lastIndex( sizes , layer - 1 )]; neuron++)
{
dummyDouble += weights[ lastIndex( weights , layer - 1 )][neuron][j] * delta[neuron];
}
dummyVector.push_back(dummyDouble * sp[j]);
dummyDouble = 0;
}
delta = dummyVector;
dummyVector.clear();
derivativeBiases.push_back(delta);
for (neuron = 0; neuron < sizes[ lastIndex( sizes , layer )]; neuron++)
{
dummyMatrix.push_back(std::vector<double>(sizes[ lastIndex( sizes , layer + 1 )]));
for (j = 0; j < sizes[ lastIndex( sizes , layer + 1 )]; j++)
{
dummyMatrix[neuron][j] = activations[ lastIndex( activations , layer + 1 )][j] * delta[neuron];
}
}
derivativeWeights.push_back(dummyMatrix);
dummyMatrix.clear();
}
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
//both derivativeWeights and derivativeBiases are reversed. so let's reverse it.
std::reverse(derivativeWeights.begin(),derivativeWeights.end());
std::reverse(derivativeBiases.begin(),derivativeBiases.end());
std::cout << "teste "<< counter++ << std::endl;
system("PAUSE");
}
}
main.cpp
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include "neuralnetwork.h"
#include <string>
void printAll(const std::vector<double> & v, const std::string & name)
{
int size = v.size();
std::cout << "\t" << name << ":\t";
for(int i = 0 ; i < size ; i++)
{
std::cout << v[i] << "\t";
}
std::cout << std::endl;
}
template<class T>
void printAll(const std::vector<T> & v, const std::string & name)
{
int size = v.size();
std::cout << name << ":" << std::endl;
for(int i = 0 ; i < size ; i++)
{
printAll(v[i], "\t" + ("[" + std::to_string(i)) + "]");
}
}
int main(int argc, char** argv )
{
nn::neuralnetwork n({2,4,3});
n.weights = {{},{{1,2},{3,4},{5,6},{7,8}} , {{9,8,7,6},{5,4,3,2},{1,2,3,4}}};
n.biases = {{},{1, 4, 6, 8} , {9, 2, 4}};
printAll(n.weights,"weights");
printAll(n.biases,"biases");
std::vector<std::vector<std::vector<double>>> derivativeWeights;
std::vector<std::vector<double>> derivativeBiases;
n.backPropagation({1,2},{1,2,3},derivativeWeights,derivativeBiases);
printAll(n.derivativeWeights,"derivativeWeights");
printAll(n.derivativeBiases,"derivativeBiases");
system("PAUSE");
return 0;
}
It looks like your problem is that you are only reserving memory for the vectors in the constructor, not allocating it.
The reserve method does not resize the vector, it is a performance optimization in cases where you know you will resize the vector in the future, but an optimizing compiler is free to ignore it.
This isn't causing a problem for 'weights' and 'biases' in this particular code becuase you are initializing them with vectors of the proper size, which does set them to the correct size. The problems are with derivativeWeights and derivativeBiases, where you reserve memory for the vectors, but they are never actually resized. This makes this memory potentially invalid if you try to dereference it. You could use resize instead of reserve, or push back the elements one by one, which will also resize the vector.
Another comment is that you don't have to use this-> for every member of the class, the 'this->' is assumed for members of the class if you don't use it.
I didn't found the problem, but I realised that, for this problem (artificial neural network), I can initialize each property of the class with each right sizes without loss of generality. So this is what I'm going to do.
I feel a little ashamed that I am not finding it... :/
#include <iostream>
#include <vector>
#include <list>
using namespace std ;
class Graph
{
public:
Graph(int V)
{
this->V = V ;
G.resize(V) ;
}
void addEdge(int v , int w)
{
G[v].push_back(w) ;
G[w].push_back(v) ;
}
void DFS( int s )
{
bool *visited = new bool[this->V] ;
for( int i = 0 ; i < this->V ; i++ )
visited[i] = false ;
int *arrival = new int[this->V] ;
int *departure = new int[this->V] ;
static int t = 0 ;
DFSUtil(s,visited,arrival,departure,t) ; // Utility function to do the DFS
cout << "\n" ;
for( int i = 0 ; i < this->V ; i++ )
cout << arrival[i] << "/" << departure[i] << " " ;
}
void printGraph()
{
vector< list<int> >::iterator i ;
list<int>::iterator j ;
int k = 0 ;
int t = 0 ;
for( i = G.begin() ; i != G.end() ; ++i )
{
cout << "Node " << k++ << "->" ;
for( j = G[t].begin() ; j != G[t].end() ; ++j )
cout << *j << "->" ;
cout << "NULL\n" ;
t++ ;
}
}
private:
int V ; // number of vertices
vector< list<int> > G ; // array of lists
void DFSUtil( int s , bool *visited , int *arrival , int *departure , int t ) // Utility function to do DFS
{
cout << s << " " ;
visited[s] = true ;
arrival[s] = ++t ;
list<int>::iterator i ;
for( i = G[s].begin() ; i != G[s].end() ; ++i )
{
if( !visited[*i] )
DFSUtil(*i,visited,arrival,departure,t) ;
}
departure[s] = ++t ;
}
};
int main()
{
// Create a graph given in the above diagram
Graph g(6);
g.addEdge(0, 1);
g.addEdge(0, 2);
g.addEdge(1, 2);
g.addEdge(0, 4);
g.addEdge(0, 3);
g.addEdge(1, 4);
g.addEdge(1, 5);
g.addEdge(4, 5);
g.addEdge(3, 5);
g.printGraph() ;
cout << "Following is Depth First Traversal (starting from vertex 0) \n";
g.DFS(0);
return 0;
}
I wanted to timestamp the process of DFS ie when the search process reaches a particular node and then bactracks from it . What I mean is that when DFS begins it first visits node 0 so arrival[0] = 1 , then it recursively calls on node 1 so arrival[1] = 2 , then recursively calls on node 2 so arrival[2] = 3 , now node 2 can't call on anyone so departure[2] should be 4 and subsequently , but what my program outputs is not on the same lines as I was expecting . I thought declaring timestamp variable t as static would do the trick , but it's not working . How to correct it ?
"static" in this context essentially means "there is only one of these that is shared by all invokations of this function".
You're still passing a copy of the variable as a parameter to the other functions.
If you want a called function to be able to modify a variable, you pass it a reference to that variable.
Remove the "static", and change DFSUtil to
void DFSUtil(int s, bool *visited, int *arrival, int *departure, int &t)