reading from ssh channel and writing to a buffer - c++

I have this function which if you connect to a system with ssh, you can call it to execute your given command on that system.
std::string sshconnection::exec_ssh_command(ssh_session session, char *command) {
string receive = "";
int rc, nbytes;
char buffer[256];
ssh_channel channel = ssh_channel_new(session);
if( channel == NULL )
return NULL;
rc = ssh_channel_open_session(channel);
if( rc != SSH_OK ) {
ssh_channel_free(channel);
return NULL;
}
rc = ssh_channel_request_exec(channel, command);
if( rc != SSH_OK ) {
ssh_channel_close(channel);
ssh_channel_free(channel);
cout << "Error";
return NULL;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
while (nbytes > 0)
{
if (write(1, buffer, nbytes) != (unsigned int) nbytes)
{
ssh_channel_close(channel);
ssh_channel_free(channel);
return NULL;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
}
if( nbytes < 0 )
return NULL;
ssh_channel_send_eof(channel);
ssh_channel_close(channel);
ssh_channel_free(channel);
return receive;
}
this function works great. I just don't understand that part which is about to write from buffer into a file descriptor=1 . we haven't filled receive anywhere but it is the return value. if we call this function like below:
s = exec_ssh_command(my_ssh_session, "cat /proc/stat" );
the s won't have any value, but if we do this:
std::cout<<s;
this will print s value. and of course we can't save s in a file. can someone explain to me how is this happening?
EDIT:function to connect to ssh:
int sshconnection::sshConnection()
{
if( my_ssh_session == NULL ) {
cout << "Error creating ssh session" << endl;
return 1;
}
ssh_options_set(my_ssh_session, SSH_OPTIONS_HOST, "yourip");
ssh_options_set(my_ssh_session, SSH_OPTIONS_USER, "username");
int rc = ssh_connect(my_ssh_session);
if( rc != SSH_OK ) {
cout << "Error with connecting" << endl;
ssh_free(my_ssh_session);
return -1;
}
rc = ssh_userauth_password(my_ssh_session, NULL, "yourpassword");
if( rc != SSH_AUTH_SUCCESS) {
cout << "Error with authorization " << ssh_get_error(my_ssh_session) << endl;
ssh_disconnect(my_ssh_session);
ssh_free(my_ssh_session);
return -1;
}
// ssh_disconnect(my_ssh_session);
// ssh_free(my_ssh_session);
}

I know this is old, but I had the same issue. I came up with the following solution.
Use std::string::append like so receive.append(buffer, nbytes).
std::string sshconnection::exec_ssh_command(ssh_session session, char *command) {
string receive = "";
int rc, nbytes;
char buffer[256];
ssh_channel channel = ssh_channel_new(session);
if( channel == NULL )
return NULL;
rc = ssh_channel_open_session(channel);
if( rc != SSH_OK ) {
ssh_channel_free(channel);
return NULL;
}
rc = ssh_channel_request_exec(channel, command);
if( rc != SSH_OK ) {
ssh_channel_close(channel);
ssh_channel_free(channel);
cout << "Error";
return NULL;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
while (nbytes > 0)
{
receive.append(buffer, nbytes);
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
}
if( nbytes < 0 )
return NULL;
ssh_channel_send_eof(channel);
ssh_channel_close(channel);
ssh_channel_free(channel);
return receive;
}

Related

how to get output of a wrong command with libssh

I have this function which can connect to a remote system via ssh:
std::string _ErrMsg;
int _RetVal = 0;
MyException errMsg;
int port = 22;
try
{
if (my_ssh_session == NULL) {
std::cout << "Error creating ssh session" << std::endl;
throw MyException("Error in creating session");
_RetVal = -1;
return _RetVal;
}
ssh_options_set(my_ssh_session, SSH_OPTIONS_HOST, (const void*)(authentication.ip));
ssh_options_set(my_ssh_session, SSH_OPTIONS_USER, (const void*)(authentication.userName));
ssh_options_set(my_ssh_session, SSH_OPTIONS_PORT, &port);
int rc = ssh_connect(my_ssh_session);
if (rc != SSH_OK) {
std::cout << "Error with connecting" << std::endl;
_ErrMsg = ssh_get_error(my_ssh_session);
ssh_free(my_ssh_session);
_RetVal = -2;
throw MyException(_ErrMsg);
}
rc = ssh_userauth_password(my_ssh_session, NULL, (const char*)(authentication.pw));
if (rc != SSH_AUTH_SUCCESS) {
std::cout << "Authentication failed " << ssh_get_error(my_ssh_session) << std::endl;
_ErrMsg = ssh_get_error(my_ssh_session);
ssh_disconnect(my_ssh_session);
ssh_free(my_ssh_session);
_RetVal = -3;
throw MyException(_ErrMsg);
}
}
catch (MyException& e)
{
throw e;
}
return _RetVal;
and this function which executes a command through ssh channel:
std::string ssh::exec_ssh_command(char* command)
{
std::string receive = "";
std::string err;
int rc, nbytes;
char buffer[2000];
MyException errMsg;
try {
my_ssh_session = ssh_new();
ssh_channel channel = ssh_channel_new(my_ssh_session);
if (channel == NULL)
{
receive = "Channel allocation failed.";
throw MyException(receive);
}
rc = ssh_channel_open_session(channel);
if (rc != SSH_OK)
{
free_channel(channel);
receive = "Opening session channel failed.";
throw MyException(receive);
}
rc = ssh_channel_request_exec(channel, command);
if (rc != SSH_OK) {
receive = "Channel's request executing failed.";
free_channel(channel);
throw MyException(receive);
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
receive = buffer;
if (nbytes > 0)
{
receive.erase(nbytes - 1, 2000);
}
else
{
receive = "Error in command: not found or wrong syntax";
throw MyException(receive);
}
if (nbytes < 0)
{
receive = "Error in reading data from channel ";
throw MyException(receive);
}
free_channel(channel);
free_session(my_ssh_session);
}
catch (MyException& err)
{
throw err;
}
return receive;
}
I want to throw an exception when a wrong command is sent to this function. for example, if I send this command: ls /sys/class/net | se -n - 1p. if I run this command on terminal, I got this error : se: command not found.when I run it like this : ls /sys/class/net | sed -n -s 1p | grep 'something irrelevant', it gives nothing. it gives nothing as output in both ways. and nbytes will be 0. is there any way to take that se: command not found ??
Errors are printed on the stderr channel, which is distinct from the default stdout channel.
The last argument of ssh_channel_read determines which is read: 0 for stdout, 1 for stderr.
I suggest you read from both channels by calling ssh_channel_read twice.

Trying to execute command on router. C++ & libssh

When I'm trying to run command on server (router) I get
[C] Jan 3 05:32:16 ndm: bin::ndmc: invalid option "-c".
Most of the code is taken from the documentation.
There is the code:
#include <iostream>
#include <string.h>
#include <libssh/libssh.h>
using std::string;
using std::cout;
using std::endl;
using std::cin;
string exec_ssh_command(ssh_session
session, char *command) {
string receive = "";
int rc, nbytes;
char buffer[256];
ssh_channel channel = ssh_channel_new(session);
if( channel == NULL )
return NULL;
rc = ssh_channel_open_session(channel);
if( rc != SSH_OK ) {
ssh_channel_free(channel);
return NULL;
}
rc = ssh_channel_request_exec(channel, command);
if( rc != SSH_OK ) {
ssh_channel_close(channel);
ssh_channel_free(channel);
cout << "Error";
return NULL;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
while (nbytes > 0)
{
if (write(1, buffer, nbytes) != (unsigned int) nbytes)
{
ssh_channel_close(channel);
ssh_channel_free(channel);
return NULL;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
}
if( nbytes < 0 )
return NULL;
ssh_channel_send_eof(channel);
ssh_channel_close(channel);
ssh_channel_free(channel);
return receive;
}
int main() {
ssh_session my_ssh_session = ssh_new();
if( my_ssh_session == NULL ) {
cout << "Error creating ssh session" << endl;
return 1;
}
ssh_options_set(my_ssh_session, SSH_OPTIONS_HOST, "192.168.1.1");
ssh_options_set(my_ssh_session, SSH_OPTIONS_USER, "admin");
int rc = ssh_connect(my_ssh_session);
if( rc != SSH_OK ) {
cout << "Error with connecting" << endl;
ssh_free(my_ssh_session);
return -1;
}
rc = ssh_userauth_password(my_ssh_session, NULL, "pass");
if( rc != SSH_AUTH_SUCCESS) {
cout << "Error with authorization " << ssh_get_error(my_ssh_session) << endl;
ssh_disconnect(my_ssh_session);
ssh_free(my_ssh_session);
return -1;
}
string ip_hotspot = exec_ssh_command(my_ssh_session, "show ip hotspot");
cout << ip_hotspot << endl;
ssh_disconnect(my_ssh_session);
ssh_free(my_ssh_session);
return 0;
}
My router is zyxel keenetic ultra.
uname -a: Linux localhost 4.9.112-perf-gfb7749d #1 SMP PREEMPT Thu Dec 6 16:06:30 WIB 2018 aarch64 Android
g++ -v:
clang version 7.0.1 (tags/RELEASE_701/final)
Target: aarch64--linux-android
Thread model: posix
I'm a beginner at libssh so please explain easier. :)
Thank you in advance!
It was fixed. On version 3.3.2 of Keenetic OS it works fine.

How to set start time of video saved from RTSP stream with FFMPEG

I use FFMPEG to record video from a RTSP stream. What my code does is get current day time, create a folder with this format year/month/day/hour/minute and save the video to that folder.
When a new minute arrive, I create the new folder base on the new minute and run the record again to the new folder.
Basically It works, but the next video start time is continue the end of previous video. For example:
video1: 00:00 -> 00:55
video2: 00:56 -> ...
I hope I can set for all videos start from 00:00. Can I do that?
Here my code
ffmpeg.h
class CtFfmpeg {
public:
CtFfmpeg();
~CtFfmpeg();
void init();
int getInput();
int getOutputName(const char *filename);
int release();
int ret;
AVFormatContext *ifmt_ctx, *ofmt_ctx;
AVStream *in_stream, *out_stream;
AVPacket pkt;
const char *in_filename;
char *out_filename;
private:
int setOutput(const char *outfilename);
AVOutputFormat *ofmt;
};
ffmpeg.cpp
#include "ctffmpeg.h"
CtFfmpeg::CtFfmpeg() {
in_filename = new char [1024];
out_filename = new char [1024];
}
CtFfmpeg::~CtFfmpeg() {
delete [] in_filename;
delete [] out_filename;
}
void CtFfmpeg::init() {
avcodec_register_all();
av_register_all();
avformat_network_init();
pkt = { 0 };
av_init_packet(&pkt);
ofmt = NULL;
ifmt_ctx = NULL;
ofmt_ctx = NULL;
return;
}
int CtFfmpeg::release() {
av_write_trailer(ofmt_ctx);
avcodec_close(out_stream->codec);
// avcodec_close(in_stream->codec);
// avformat_close_input(&ifmt_ctx);
/* close output */
if (!(ofmt->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_free_packet(&pkt);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred\n");
return 1;
}
}
int CtFfmpeg::getInput() {
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
release();
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
release();
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
}
int CtFfmpeg::setOutput(const char *outfilename) {
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, outfilename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
release();
}
ofmt = ofmt_ctx->oformat;
for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
in_stream = ifmt_ctx->streams[i];
out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
release();
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
release();
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
} // for
av_dump_format(ofmt_ctx, 0, outfilename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, outfilename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", outfilename);
release();
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
release();
}
}
int CtFfmpeg::getOutputName(const char *filename){
sprintf(out_filename,filename);
setOutput(out_filename);
}
main.cpp
#include "ctfolder.h"
#include "ctffmpeg.h"
CtFfmpeg * ff;
int main(int argc, char** argv) {
if (argc < 2) {
printf("usage: %s <RTSP link> \n", argv[0]);
return 1;
}
ff = new CtFfmpeg();
ff->in_filename = argv[1]; //RTSP input link
ff->init();
ff->getInput();
string filename;
videoRecorder obj;
int start, now;
start = obj.get_current_min();
if(obj.create_folder(0755))
cout << "Cannot create folder, maybe it already exists" << endl;
else
cout << "Create folder succesfully" << endl;
int skip = 0;
while(1){
filename = obj.update_filename();
ff->getOutputName(filename.c_str());
while((now = obj.get_current_min()) == start) {
ff->ret = av_read_frame(ff->ifmt_ctx, &(ff->pkt));
skip++;
if(skip==1)
continue;
if(skip>2)
skip=2;
if (ff->ret < 0)
continue;
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration, ff->in_stream->time_base, ff->out_stream->time_base);
ff->pkt.pos = -1;
ff->ret = av_interleaved_write_frame(ff->ofmt_ctx, &(ff->pkt));
if (ff->ret < 0) {
fprintf(stderr, "Error muxing packet\n");
continue;
}
av_free_packet(&(ff->pkt));
}
ff->release();
cout << "New minute!" << endl;
if(obj.create_folder(0755))
cout << "Cannot create folder, something's wrong" << endl;
else
cout << "Create folder succesfully" << endl;
start = now;
}
return 0;
}
You need to shift your recording packet's pts to 0.
while(<some condition>)
{
//...
int64_t pts_offset = AV_NOPTS_VALUE ;
while((now = obj.get_current_min()) == start)
{
//...
ff.pkt.pts = ...
//...
if( pts_offset == AV_NOPTS_VALUE )
{
pts_offset = ff.pkt.pts ;
}
ff.pkt.pts -= pts_offset ;
// ...
}
}
I tried to build your code and add Alexander Chernin suggestion to it but I face to muxer error!
When you decrease recording packet's pts, it's value go lower than recording packet's dts. In avcodec.h, above declaration of pts I found this comment:
pts MUST be larger or equal to dts as presentation cannot happen before decompression.
I solved this error by decreasing recording packet's dts.
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->ff->out_stream->(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
if (pts_offset == AV_NOPTS_VALUE) {
pts_offset = ff->pkt.pts;
}
ff->pkt.pts -= pts_offset;
ff->pkt.dts -= pts_offset;
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base,ff->out_stream->time_base,(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration,ff->in_stream->time_base,ff->out_stream->time_base);
ff->pkt.pos = -1;

C++ Retrieve data via SSH using libssh libary failed

I'm trying to get the output of the command 'df' which I'll replace later, from a remote server:
#include <libssh/libssh.h>
#include <stdlib.h>
#include <stdio.h>
int main()
{
ssh_session my_ssh_session;
int rc;
ssh_channel channel;
char buffer[256];
int nbytes;
int port = 22;
my_ssh_session = ssh_new();
if (my_ssh_session == NULL)
exit(-1);
ssh_options_set(my_ssh_session, SSH_OPTIONS_HOST, "192.168.2.2");
ssh_options_set(my_ssh_session, SSH_OPTIONS_PORT, &port);
rc = ssh_connect(my_ssh_session);
if (rc != SSH_OK)
{
fprintf(stderr, "Failed %s\n",
ssh_get_error(my_ssh_session));
exit(-1);
}
channel = ssh_channel_new(my_ssh_session);
if (channel == NULL)
return SSH_ERROR;
rc = ssh_channel_open_session(channel);
if (rc != SSH_OK)
{
ssh_channel_free(channel);
return rc;
}
rc = ssh_channel_request_exec(channel, "df");
if (rc != SSH_OK)
{
ssh_channel_close(channel);
ssh_channel_free(channel);
return rc;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
while (nbytes > 0)
{
if (write(1, buffer, nbytes) != nbytes)
{
ssh_channel_close(channel);
ssh_channel_free(channel);
return SSH_ERROR;
}
nbytes = ssh_channel_read(channel, buffer, sizeof(buffer), 0);
}
if (nbytes < 0)
{
ssh_channel_close(channel);
ssh_channel_free(channel);
return SSH_ERROR;
}
ssh_channel_send_eof(channel);
ssh_channel_close(channel);
ssh_channel_free(channel);
return SSH_OK;
ssh_disconnect(my_ssh_session);
ssh_free(my_ssh_session);
}
The compiler didn't show any errors,
but there were no results when I run the program,
I checked the syslog of the remote server and I found the following line:
sshd[12794]: dispatch_protocol_error: type 90 seq 3
Please advise what could be the problem,
Thank you.
It seems like you are trying to get to the remote server without functions of host authentification (e.g. checking information from /.ssh/known_hosts) and user authentificationa through public keys or password. You should place these two functions after the
if (rc != SSH_OK)
{
fprintf(stderr, "Failed %s\n", ssh_get_error(my_ssh_session));
exit(-1);
}
Look through the chapters 1 and 2 in libssh tutorial.

C++ CreateIoCompletionPort on new socket

EDIT: I am guessing the problem is I have to associate the OVERLAPPED or WSAOVERLAPPED in the container with my completion port. Is that correct?
I can get IO completions when someone connects to my server. I then use CreateIoCompletionPort on the new socket, with the completionport that original was used. But when they send me data, it does not get set off. Although, it still gets set off if someone else connects. My question is, why would this happen? I also make sure CreateIoCompletionPort returns the same handle as was the original. What gives?
EDIT:
DWORD WINAPI worker_thread(LPVOID lpParam) {
client_information_class *cicc = NULL;
HANDLE CompletionPort = (HANDLE)lpParam;
ULONG_PTR Key;
DWORD BytesTransfered;
OVERLAPPED *lpOverlapped = NULL;
DWORD error = NULL;
while(1) {
error = GetQueuedCompletionStatus(CompletionPort, &BytesTransfered, (PULONG_PTR)&Key, &lpOverlapped, 0);
cicc = CONTAINING_RECORD ( lpOverlapped, client_information_class, ol );
if ( error == TRUE ) {
cout << endl << "IO TRIGGERED" << endl;
switch ( cicc->operation ) {
/*#define OP_ACCEPT 0
#define OP_READ 1
#define OP_WRITE 2*/
case 0:{
if ( check_auth_progress ( cicc->client_socket , cicc->client_buff , BytesTransfered ) ) {
cout << "Client " << cicc->client_socket << " connected." << endl;
client_information_class *k = NULL;
SOCKADDR_STORAGE *LocalSockaddr=NULL, *RemoteSockaddr=NULL;
int LocalSockaddrLen,RemoteSockaddrLen;
k = (client_information_class *)Key;
k->lpfnGetAcceptExSockaddrs(
cicc->client_buff,
cicc->client_len - ((sizeof(SOCKADDR_STORAGE) + 16) * 2),
sizeof(SOCKADDR_STORAGE) + 16,
sizeof(SOCKADDR_STORAGE) + 16,
(SOCKADDR **)&cicc->LocalSockaddr,
&cicc->LocalSockaddrLen,
(SOCKADDR **)&cicc->RemoteSockaddr,
&cicc->RemoteSockaddrLen
);
client_information_class *cicc2 = NULL;
cicc2 = ( client_information_class *)HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(client_information_class) + (sizeof(BYTE) * 4096));
if (cicc2 == NULL) {
fprintf(stderr, "Out of memory!\n");
}
cicc2->client_socket = cicc->client_socket;
cicc2->client_socketaddr_in = cicc->client_socketaddr_in;
cicc2->LocalSockaddr = cicc->LocalSockaddr;
cicc2->LocalSockaddrLen = cicc->LocalSockaddrLen;
cicc2->RemoteSockaddr = cicc->RemoteSockaddr;
cicc2->RemoteSockaddrLen = cicc->RemoteSockaddrLen;
HANDLE hrc = CreateIoCompletionPort( (HANDLE)cicc2->client_socket, CompletionPort, (ULONG_PTR)cic, 0 );
if (hrc == NULL) {
fprintf(stderr, "CompletionThread: CreateIoCompletionPort failed: %d\n", GetLastError());
return 0;
} else {
fprintf(stderr, "CompletionThread: CreateIoCompletionPort: %d\n", hrc);
}
cic->deleteNode ( cicc->client_socket , cic );
cic->addNode ( cicc2 );
} else {
cout << endl << "Something Happened ... " << endl;
}
}break;
case 1:{
if ( ParsePacket ( cicc->client_socket , data ) ) {
cout << "Client " << cicc->client_socket << " connected." << endl;
} else {
cout << endl << "Something Happened ... " << endl;
}
}break;
default:{
cout << endl << "Didnt catch that operation ... " << cicc->operation << endl;
}break;
}
} else if ( error == FALSE && &lpOverlapped == NULL ) {
// no packet was dequed...
fprintf(stderr, "[error == FALSE && &lpOverlapped == NULL] CompletionThread: GetQueuedCompletionStatus failed: %d [0x%x]\n", GetLastError(), &lpOverlapped->Internal);
} else if ( error == FALSE && &lpOverlapped != NULL ) {
if((DWORD)&lpOverlapped->Internal == 0x0) { // a timeout...
} else {
fprintf(stderr, "[error == FALSE && &lpOverlapped != NULL] CompletionThread: GetQueuedCompletionStatus failed: %d [0x%x]\n", GetLastError(), &lpOverlapped->Internal);
}
}
}
ExitThread(0);
return 0;
}
Id hate to do this again, but I was correct, you have to place the socket into a new mode (much like acceptex) using WSARECV: I did not know this, and its not very clear on the MSDN, and one of the sources I was looking at to learn IOCP, doesn't talk about it. Hopefully this helps someone :/
WSABUF wbuf;
DWORD bytes, flags;
wbuf.buf = cicc2->client_buff;
wbuf.len = cicc2->client_len;
flags = 0;
int rr = WSARecv ( cicc2->client_socket , &wbuf , 1 , &bytes , &flags , &cicc2->ol , NULL );
if (rr == FALSE) {
if (WSAGetLastError() != WSA_IO_PENDING) {
printf("PostRecv: WSARecv* failed: %d\n", WSAGetLastError());
closesocket(cicc2->client_socket);
cic->deleteNode ( cicc2->client_socket , cic );
}
fprintf(stderr, "PostRecv: WSARecv* failed: %d\n", GetLastError());
}