I need to transfer an array of tiles from C++ to Angelscript, I have tried adding a function that returns an std::vector but it returns this error
Failed in call to function 'RegisterGlobalFunction' with 'array<DumbTile> GetTilesAt(int x, int y)' (Code: asINVALID_DECLARATION, -10)
my code:
std::vector<DumbTile> GetTilesAt(int x, int y) {
std::vector<DumbTile> output;
for (DumbTile t : tiles) {
if (t.x == x && t.y == y) {
output.push_back(t);
}
}
return output;
}
engine->RegisterGlobalFunction("array<DumbTile> GetTilesAt(int x, int y)", asFUNCTIONPR(GetTilesAt, (int, int), std::vector<DumbTile>), asCALL_CDECL);
Is DumbTile registered before GetTilesAt() registration?
Is array<T> registered before GetTilesAt() registration?
Both needs to be registered before you can register your function.
Are you using stock array implementation (sdk/add_on/scriptarray/) or your own, std::vector<>-based implementation? When using stock addon, application must convert std::vector to CScriptArray first, as angelscript can't really do that on its own due to way how arrays works.
There is obvious alternative - switch from using std::vector to CScriptArray everywhere where you want scripts to access data, but this might be annoying.
Example std::vector<Object*> <-> CScriptArray* conversion
template<typename Type>
void AppendVectorToArrayRef( vector<Type>& vec, CScriptArray* arr )
{
if( !vec.empty() && arr )
{
uint i = (uint)arr->GetSize();
arr->Resize( (asUINT)(i + (uint)vec.size() ) );
for( uint k = 0, l = (uint)vec.size(); k < l; k++, i++ )
{
Type* p = (Type*)arr->At( i );
*p = vec[k];
(*p)->AddRef();
}
}
}
template<typename Type>
void AssignScriptArrayInVector( vector<Type>& vec, CScriptArray* arr )
{
if( arr )
{
uint count = (uint)arr->GetSize();
if( count )
{
vec.resize( count );
for( uint i = 0; i < count; i++ )
{
Type* p = (Type*)arr->At( i );
vec[i] = *p;
}
}
}
}
Code is bit old but i think it should still work, even if it begs for some refresh.
Related
I'm trying to make light-weight layer on top of continuous array of arbitrary structs (lets call it DataItem), which will handle common operations like file-IO, rendering on screen/GUI (like excel table), searching and sorting by difernet properties etc.
But I want to make my class Table and user-defined struct/class DataItem to be completely independent of each other (i.e. both can compile without knowing each others header-file .h ). I think it cannot be like template<class T> class Table{ std::vectro<T> data;}; because then user would be obligated to implement functionality like DataItem::toString(int icolumn) and I don't want to put that constrain on DataItem struct.
My current implementation rely on pointer arithmetics, switch, and can handle only few types of data members (bool,int,float,double). I wonder if e.g. using templates this could be improved (to make it more generic, safe etc...) without considerably increasing complexity and performance cost.
I want to use it like this:
#include "Table.h"
#include "GUI.h"
#include "Vec3d.h"
// example of user defined DataItem struct
struct TestStruct{
int inum = 115;
double dnum = 11.1154546;
double fvoid= 0.0;
float fnum = 11.115;
Vec3d dvec = (Vec3d){ 1.1545, 2.166, 3.1545};
};
int main(){
// ==== Initialize test data
Table* tab1 = new Table();
tab1->n = 120;
TestStruct* tab_data = new TestStruct[tab1->n];
for(int i=0; i<tab1->n; i++){
tab_data[i].inum = i;
tab_data[i].fnum = i*0.1;
tab_data[i].dnum = i*0.01;
}
// ==== Bind selected properties/members of TestStruct as columns int the table
tab1->bind(tab_data, sizeof(*tab_data) );
// This is actually quite complicated =>
// I would be happy if it could be automatized by some template magic ;-)
tab1->addColum( &(tab_data->inum), 1, DataType::Int );
tab1->addColum( &(tab_data->fnum), 1, DataType::Float );
tab1->addColum( &(tab_data->dnum), 1, DataType::Double );
tab1->addColum( &(tab_data->dvec), 3, DataType::Double );
// ==== Visualize the table Table in GUI
gui.addPanel( new TableView( tab1, "tab1", 150.0, 250.0, 0, 0, 5, 3 ) );
gui.run();
}
My current implementation looks like this:
enum class DataType{ Bool, Int, Float, Double, String };
struct Atribute{
int offset; // offset of data member from address of struct instance [bytes]
int nsub; // number of sub units. e.g. 3 for Vec3
DataType type; // type for conversion
Atribute() = default;
Atribute(int offset_,int nsub_,DataType type_):offset(offset_),nsub(nsub_),type(type_){};
};
class Table{ public:
int n; // number of items/lines in table
int itemsize = 0; // number of bytes per item
char* data = 0; // pointer to data buffer with structs; type is erased to make it generic
std::unordered_map<std::string,int> name2column;
std::vector <Atribute> columns;
void bind(void* data_, int itemsize_){
data=(char*)data_;
itemsize=itemsize_;
}
int addColum(void* ptr, int nsub, DataType type){
// determine offset of address of given data-member with respect to address of enclosing struct
int offset = ((char*)ptr)-((char*)data);
columns.push_back( Atribute( offset, nsub, type ) );
return columns.size()-1;
}
char* toStr(int i, int j, char* s){
const Atribute& kind = columns[j];
void* off = data+itemsize*i+kind.offset; // address of j-th member of i-th instance in data array
// I don't like this switch,
// but still it seems simpler and more efficient that alternative solutions using
// templates/lambda function or function pointers
switch(kind.type){
case DataType::Bool :{ bool* arr=(bool *)off; for(int i=0; i<kind.nsub; i++){ s+=sprintf(s,"%c ", arr[i]?'T':'F' ); }} break;
case DataType::Int :{ int* arr=(int *)off; for(int i=0; i<kind.nsub; i++){ s+=sprintf(s,"%i ", arr[i] ); }} break;
case DataType::Float :{ float* arr=(float *)off; for(int i=0; i<kind.nsub; i++){ s+=sprintf(s,"%g ", arr[i] ); }} break;
case DataType::Double :{ double* arr=(double*)off; for(int i=0; i<kind.nsub; i++){ s+=sprintf(s,"%g ", arr[i] ); }} break;
case DataType::String :{ char* arr=(char *)off; for(int i=0; i<kind.nsub; i++){ s+=sprintf(s,"%s ", arr[i] ); }} break;
}
return s;
}
};
// .... Ommited most of TableView GUI ....
void TableView::render(){
Draw ::setRGB( textColor );
char stmp[1024];
for(int i=i0; i<imax;i++){
int ch0 = 0;
for(int j=j0; j<jmax;j++){
int nch = table->toStr(i,j,stmp)-stmp; // HERE!!! I call Table::toStr()
Draw2D::drawText( stmp, nch, {xmin+ch0*fontSizeDef, ymax-(i-i0+1)*fontSizeDef*2}, 0.0, GUI_fontTex, fontSizeDef );
ch0+=nchs[j];
}
}
}
One way of solving this type of problem is by providing a "traits" class which tells one class how to deal with another class without having to modify the second class. This pattern is used extensively in the standard library.
Your code could be written as:
#include <iostream>
#include <string>
#include <vector>
#include <array>
template <typename T>
struct TableTraits;
template <typename T>
class Table
{
public:
void setData( const std::vector<T>& value )
{
data = value;
}
std::string toString( size_t row, size_t column )
{
return TableTraits<T>::toString( data[ row ], column );
}
void print()
{
for ( size_t row = 0; row < data.size(); row++ )
{
for ( size_t column = 0; column < TableTraits<T>::columns; column++ )
{
std::cout << toString( row, column ) << ", ";
}
std::cout << "\n";
}
}
private:
std::vector<T> data;
};
struct TestStruct
{
int inum = 115;
double dnum = 11.1154546;
double fvoid = 0.0;
float fnum = 11.115f;
std::array<double, 3> dvec = { 1.1545, 2.166, 3.1545 };
};
template <typename T>
std::string stringConvert( const T& value )
{
return std::to_string( value );
}
template <typename T, size_t N>
std::string stringConvert( const std::array<T, N>& value )
{
std::string result;
for ( auto& v : value )
{
result += stringConvert( v ) + "; ";
}
return result;
}
template <>
struct TableTraits<TestStruct>
{
static const size_t columns = 5;
static std::string toString( const TestStruct& row, size_t column )
{
switch ( column )
{
case 0:
return stringConvert( row.inum );
case 1:
return stringConvert( row.dnum );
case 2:
return stringConvert( row.fvoid );
case 3:
return stringConvert( row.fnum );
case 4:
return stringConvert( row.dvec );
default:
throw std::invalid_argument( "column out of range" );
}
}
};
int main()
{
std::vector<TestStruct> data( 10 );
Table<TestStruct> table;
table.setData( data );
table.print();
}
The exact details of the traits class could be different if this example doesn't exactly meet your needs.
You might also find it useful to have the traits methods and constants non-static so that you can pass a traits object into your table to allow customisation per table instance.
You might also want to allow use of a custom traits class in your table, something like this:
template <typename T, typename Traits = TableTraits<T>>
class Table
{
...
std::string toString( size_t row, size_t column )
{
return Traits::toString( data[ row ], column );
}
It looks to me you are trying to use dynamic polymorphism (at run-time) using C-like structures and C-like polymorphism in C++. Templates are useful for static polymorphism. The proper direction is to use OOP (especially class polymorphism), defining concepts as classes: table, cell, column, row, cell-value, etc. As examples, you can check on Github:
OpenXLSX
Spreadsheets-Lower
I played a bit with the experimental device lambdas that where introduced in CUDA 7.5 and promoted in this blog post by Mark Harris.
For the following example I removed a lot of stuff that is not needed to show my problem (my actual implementation looks a bit nicer...).
I tried to write a foreach function that operates either on vectors on device (1 thread per element) or host (serial) depending on a template parameter. With this foreach function I can easily implement BLAS functions. As an example I use assigning a scalar to each component of a vector (I attach the complete code in the end):
template<bool onDevice> void assignScalar( size_t size, double* vector, double a )
{
auto assign = [=] __host__ __device__ ( size_t index ) { vector[index] = a; };
if( onDevice )
{
foreachDevice( size, assign );
}
else
{
foreachHost( size, assign );
}
}
However, this code gives a compiler error because of the __host__ __device__ lambda:
The closure type for a lambda ("lambda ->void") cannot be used in the template argument type of a __global__ function template instantiation, unless the lambda is defined within a __device__ or __global__ function
I get the same error if I remove the __device__ from the lambda expression and I get no compile error if I remove __host__ (only __device__ lambda), but in this case the host part is not executed...
If I define the lambda as either __host__ or __device__ separately, the code compiles and works as expected.
template<bool onDevice> void assignScalar2( size_t size, double* vector, double a )
{
if( onDevice )
{
auto assign = [=] __device__ ( size_t index ) { vector[index] = a; };
foreachDevice( size, assign );
}
else
{
auto assign = [=] __host__ ( size_t index ) { vector[index] = a; };
foreachHost( size, assign );
}
}
However, this introduces code duplication and actually makes the whole idea of using lambdas useless for this example.
Is there a way to accomplish what I want to do or is this a bug in the experimental feature? Actually, defining a __host__ __device__ lambda is explicitly mentioned in the first example in the programming guide. Even for that simpler example (just return a constant value from the lambda) I couldn't find a way to use the lambda expression on both host and device.
Here is the full code, compile with options -std=c++11 --expt-extended-lambda:
#include <iostream>
using namespace std;
template<typename Operation> void foreachHost( size_t size, Operation o )
{
for( size_t i = 0; i < size; ++i )
{
o( i );
}
}
template<typename Operation> __global__ void kernel_foreach( Operation o )
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
o( index );
}
template<typename Operation> void foreachDevice( size_t size, Operation o )
{
size_t blocksize = 32;
size_t gridsize = size/32;
kernel_foreach<<<gridsize,blocksize>>>( o );
}
__global__ void printFirstElementOnDevice( double* vector )
{
printf( "dVector[0] = %f\n", vector[0] );
}
void assignScalarHost( size_t size, double* vector, double a )
{
auto assign = [=] ( size_t index ) { vector[index] = a; };
foreachHost( size, assign );
}
void assignScalarDevice( size_t size, double* vector, double a )
{
auto assign = [=] __device__ ( size_t index ) { vector[index] = a; };
foreachDevice( size, assign );
}
// compile error:
template<bool onDevice> void assignScalar( size_t size, double* vector, double a )
{
auto assign = [=] __host__ __device__ ( size_t index ) { vector[index] = a; };
if( onDevice )
{
foreachDevice( size, assign );
}
else
{
foreachHost( size, assign );
}
}
// works:
template<bool onDevice> void assignScalar2( size_t size, double* vector, double a )
{
if( onDevice )
{
auto assign = [=] __device__ ( size_t index ) { vector[index] = a; };
foreachDevice( size, assign );
}
else
{
auto assign = [=] __host__ ( size_t index ) { vector[index] = a; };
foreachHost( size, assign );
}
}
int main()
{
size_t SIZE = 32;
double* hVector = new double[SIZE];
double* dVector;
cudaMalloc( &dVector, SIZE*sizeof(double) );
// clear memory
for( size_t i = 0; i < SIZE; ++i )
{
hVector[i] = 0;
}
cudaMemcpy( dVector, hVector, SIZE*sizeof(double), cudaMemcpyHostToDevice );
assignScalarHost( SIZE, hVector, 1.0 );
cout << "hVector[0] = " << hVector[0] << endl;
assignScalarDevice( SIZE, dVector, 2.0 );
printFirstElementOnDevice<<<1,1>>>( dVector );
cudaDeviceSynchronize();
assignScalar2<false>( SIZE, hVector, 3.0 );
cout << "hVector[0] = " << hVector[0] << endl;
assignScalar2<true>( SIZE, dVector, 4.0 );
printFirstElementOnDevice<<<1,1>>>( dVector );
cudaDeviceSynchronize();
// assignScalar<false>( SIZE, hVector, 5.0 );
// cout << "hVector[0] = " << hVector[0] << endl;
//
// assignScalar<true>( SIZE, dVector, 6.0 );
// printFirstElementOnDevice<<<1,1>>>( dVector );
// cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
cout << "ERROR: " << cudaGetErrorString(error);
}
}
I used the production release of CUDA 7.5.
Update
I tried this third version for the assignScalar function:
template<bool onDevice> void assignScalar3( size_t size, double* vector, double a )
{
#ifdef __CUDA_ARCH__
#define LAMBDA_HOST_DEVICE __device__
#else
#define LAMBDA_HOST_DEVICE __host__
#endif
auto assign = [=] LAMBDA_HOST_DEVICE ( size_t index ) { vector[index] = a; };
if( onDevice )
{
foreachDevice( size, assign );
}
else
{
foreachHost( size, assign );
}
}
It compiles and runs without error, but the device version (assignScalar3<true>) is not executed. Actually, I thought that __CUDA_ARCH__ will always be undefined (since the function is not __device__) but I checked explicitly that there is a compile path where it is defined.
The task that I tried to accomplish with the examples provided in the question is not possible with CUDA 7.5, though it was not explicitly excluded from the allowed cases for the experimental lambda support.
NVIDIA announced that CUDA Toolkit 8.0 will support __host__ __device__ lambdas as an experimental feature, according to the blog post CUDA 8 Features Revealed.
I verified that my example works with the CUDA 8 Release Candidate (Cuda compilation tools, release 8.0, V8.0.26).
Here is the code that I finally used, compiled with nvcc -std=c++11 --expt-extended-lambda:
#include <iostream>
using namespace std;
template<typename Operation> __global__ void kernel_foreach( Operation o )
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
o( i );
}
template<bool onDevice, typename Operation> void foreach( size_t size, Operation o )
{
if( onDevice )
{
size_t blocksize = 32;
size_t gridsize = size/32;
kernel_foreach<<<gridsize,blocksize>>>( o );
}
else
{
for( size_t i = 0; i < size; ++i )
{
o( i );
}
}
}
__global__ void printFirstElementOnDevice( double* vector )
{
printf( "dVector[0] = %f\n", vector[0] );
}
template<bool onDevice> void assignScalar( size_t size, double* vector, double a )
{
auto assign = [=] __host__ __device__ ( size_t i ) { vector[i] = a; };
foreach<onDevice>( size, assign );
}
int main()
{
size_t SIZE = 32;
double* hVector = new double[SIZE];
double* dVector;
cudaMalloc( &dVector, SIZE*sizeof(double) );
// clear memory
for( size_t i = 0; i < SIZE; ++i )
{
hVector[i] = 0;
}
cudaMemcpy( dVector, hVector, SIZE*sizeof(double), cudaMemcpyHostToDevice );
assignScalar<false>( SIZE, hVector, 3.0 );
cout << "hVector[0] = " << hVector[0] << endl;
assignScalar<true>( SIZE, dVector, 4.0 );
printFirstElementOnDevice<<<1,1>>>( dVector );
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
cout << "ERROR: " << cudaGetErrorString(error);
}
}
I want to insert the content of array of integers :
int arr[n] to the vector of QStrings. std::vector<QString> vQString.- I can do it by inserting the array`s elements one by one :
vQString.push_back(QString::number(arr[i]));
By I prefer to do that using one insert operation - any advices?
Thanks
This isn't a 1-line solution. But is an extendable solution. What you basically do is create a template function to do the conversion for you in an exception-safe manner, like below:
namespace yournamespace {
template <typename U>
struct NumberToString {
QString operator()(const U& val) {
return QString::number(val);
}
};
template <typename T, typename U, typename Convertor>
void CopyArrayToVector(QVector<T>& dst, const U* src, const size_t size) {
QVector<T> temp;
temp.reserve(size);
for (int i = 0; i < size; ++i) {
temp.push_back(convert(src[i]));
}
dst.swap(temp);
}
}
Usage:
using yournamespace;
const size_t n = 10;
int *arr = new int[10];
QVector<String> dst;
CopyArrayToVector<QString,int,NumberToString<int> >(dst, arr, n);
DISCLAIMER: I'm not familiar with Qt framework. I whipped this up by looking at their documentation. Please feel free to correct me for any errors.
const int n = 5;
int arr[n] = { 4, 6, 2, 3, 1 };
vector< QString > v( n );
transform( arr, arr + n, v.begin(),
[] ( int i ) { return QString::number( i ); } );
for ( const QString& str : v ) {
cout << qPrintable( str ) << endl;
}
Slightly cheating...! Just use a for loop like everyone else.
The code below executes 4x faster, if near "REPLACING WITH .." line, the functor compare_swaps() will be replaced with direct reference to my_intcmp(). Apparently the indirect use is not being inlined. Why?
inline bool my_intcmp( const int & a, const int & b ) {
return a < b;
}
template <class T, class compare>
void insertion_sort_3( T* first, T* last, compare compare_swaps ) {
// Count is 1?
if( 1 == (last - first) )
return;
for( T* it = first + 1; it != last; it ++ ) {
T val = *it;
T* jt;
for( jt = it; jt != first; jt -- ) {
// REPLACING WITH if( my_intcmp(val, *(jt - 1) ) gives 4x speedup, WHY?
if( compare_swaps(val, *(jt - 1)) ) {
*jt = *(jt - 1);
} else {
break;
}
}
*jt = val;
}
}
#define SIZE 100000
#define COUNT 4
int main() {
int myarr[ SIZE ];
srand( time(NULL) );
int n = COUNT;
while( n-- ) {
for( int i = 0; i < SIZE; i ++ )
myarr[ i ] = rand() % 20000;
insertion_sort_3( myarr, myarr + SIZE, my_intcmp );
}
return 0;
}
The compiler sees a function pointer which he can't really determine as not changing. I have seen this a couple of times before. The fix to the problem is to use a simple wrapper struct:
struct my_intcmp_wrapper
{
bool operator()(int v0, int v1) const {
return my_intcmp(v0, v1);
}
};
Specifically for build-in types you probably want to pass the objects by value rather than by reference. For inlined functions it probably doesn't make much of a difference but if the function isn't inlined it generally makes the situation worse.
I need to find an element position in an std::vector to use it for referencing an element in another vector:
int find( const vector<type>& where, int searchParameter )
{
for( int i = 0; i < where.size(); i++ ) {
if( conditionMet( where[i], searchParameter ) ) {
return i;
}
}
return -1;
}
// caller:
const int position = find( firstVector, parameter );
if( position != -1 ) {
doAction( secondVector[position] );
}
however vector::size() returns size_t which corresponds to an unsigned integral type that can't directly store -1. How do I signal that the element is not found in a vector when using size_t instead of int as an index?
Take a look at the answers provided for this question: Invalid value for size_t?. Also you can use std::find_if with std::distance to get the index.
std::vector<type>::iterator iter = std::find_if(vec.begin(), vec.end(), comparisonFunc);
size_t index = std::distance(vec.begin(), iter);
if(index == vec.size())
{
//invalid
}
First of all, do you really need to store indices like this? Have you looked into std::map, enabling you to store key => value pairs?
Secondly, if you used iterators instead, you would be able to return std::vector.end() to indicate an invalid result. To convert an iterator to an index you simply use
size_t i = it - myvector.begin();
You could use std::numeric_limits<size_t>::max() for elements that was not found. It is a valid value, but it is impossible to create container with such max index. If std::vector has size equal to std::numeric_limits<size_t>::max(), then maximum allowed index will be (std::numeric_limits<size_t>::max()-1), since elements counted from 0.
std::vector has random-access iterators. You can do pointer arithmetic with them. In particular, this my_vec.begin() + my_vec.size() == my_vec.end() always holds. So you could do
const vector<type>::const_iterator pos = std::find_if( firstVector.begin()
, firstVector.end()
, some_predicate(parameter) );
if( position != firstVector.end() ) {
const vector<type>::size_type idx = pos-firstVector.begin();
doAction( secondVector[idx] );
}
As an alternative, there's always std::numeric_limits<vector<type>::size_type>::max() to be used as an invalid value.
In this case, it is safe to cast away the unsigned portion unless your vector can get REALLY big.
I would pull out the where.size() to a local variable since it won't change during the call. Something like this:
int find( const vector<type>& where, int searchParameter ){
int size = static_cast<int>(where.size());
for( int i = 0; i < size; i++ ) {
if( conditionMet( where[i], searchParameter ) ) {
return i;
}
}
return -1;
}
If a vector has N elements, there are N+1 possible answers for find. std::find and std::find_if return an iterator to the found element OR end() if no element is found. To change the code as little as possible, your find function should return the equivalent position:
size_t find( const vector<type>& where, int searchParameter )
{
for( size_t i = 0; i < where.size(); i++ ) {
if( conditionMet( where[i], searchParameter ) ) {
return i;
}
}
return where.size();
}
// caller:
const int position = find( firstVector, parameter );
if( position != secondVector.size() ) {
doAction( secondVector[position] );
}
I would still use std::find_if, though.
Something like this, I think. find_if_counted.hpp:
#ifndef FIND_IF_COUNTED_HPP
#define FIND_IF_COUNTED_HPP
#include <algorithm>
namespace find_if_counted_impl
{
template <typename Func>
struct func_counter
{
explicit func_counter(Func& func, unsigned &count) :
_func(func),
_count(count)
{
}
template <typename T>
bool operator()(const T& t)
{
++_count;
return _func(t);
}
private:
Func& _func;
unsigned& _count;
};
}
// generic find_if_counted,
// returns the index of the found element, otherwise returns find_if_not_found
const size_t find_if_not_found = static_cast<size_t>(-1);
template <typename InputIterator, typename Func>
size_t find_if_counted(InputIterator start, InputIterator finish, Func func)
{
unsigned count = 0;
find_if_counted_impl::func_counter<Func> f(func, count);
InputIterator result = find_if(start, finish, f);
if (result == finish)
{
return find_if_not_found;
}
else
{
return count - 1;
}
}
#endif
Example:
#include "find_if_counted.hpp"
#include <cstdlib>
#include <iostream>
#include <vector>
typedef std::vector<int> container;
int rand_number(void)
{
return rand() % 20;
}
bool is_even(int i)
{
return i % 2 == 0;
}
int main(void)
{
container vec1(10);
container vec2(10);
std::generate(vec1.begin(), vec1.end(), rand_number);
std::generate(vec2.begin(), vec2.end(), rand_number);
unsigned index = find_if_counted(vec1.begin(), vec1.end(), is_even);
if (index == find_if_not_found)
{
std::cout << "vec1 has no even numbers." << std::endl;
}
else
{
std::cout << "vec1 had an even number at index: " << index <<
" vec2's corresponding number is: " << vec2[index] << std::endl;
}
}
Though I feel like I'm doing something silly... :X Any corrections are welcome, of course.
You probably should not use your own function here.
Use find() from STL.
Example:
list L;
L.push_back(3);
L.push_back(1);
L.push_back(7);
list::iterator result = find(L.begin(), L.end(), 7);
assert(result == L.end() || *result == 7);
Take a vector of integer and a key (that we find in vector )....Now we are traversing the vector until found the key value or last index(otherwise).....If we found key then print the position , otherwise print "-1".
#include <bits/stdc++.h>
using namespace std;
int main()
{
vector<int>str;
int flag,temp key, ,len,num;
flag=0;
cin>>len;
for(int i=1; i<=len; i++)
{
cin>>key;
v.push_back(key);
}
cin>>num;
for(int i=1; i<=len; i++)
{
if(str[i]==num)
{
flag++;
temp=i-1;
break;
}
}
if(flag!=0) cout<<temp<<endl;
else cout<<"-1"<<endl;
str.clear();
return 0;
}
Get rid of the notion of vector entirely
template< typename IT, typename VT>
int index_of(IT begin, IT end, const VT& val)
{
int index = 0;
for (; begin != end; ++begin)
{
if (*begin == val) return index;
}
return -1;
}
This will allow you more flexibility and let you use constructs like
int squid[] = {5,2,7,4,1,6,3,0};
int sponge[] = {4,2,4,2,4,6,2,6};
int squidlen = sizeof(squid)/sizeof(squid[0]);
int position = index_of(&squid[0], &squid[squidlen], 3);
if (position >= 0) { std::cout << sponge[position] << std::endl; }
You could also search any other container sequentially as well.