Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Copyright 2001,2002 Roger Dingledine, Matej Pfajfar. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* buffers.c */
|
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
2002-08-23 08:49:43 +02:00
|
|
|
extern or_options_t options; /* command-line and config-file options */
|
|
|
|
|
2003-03-04 05:36:37 +01:00
|
|
|
/* Create a new buf of size MAX_BUF_SIZE. Write a pointer to it
|
|
|
|
* into *buf, write MAX_BUF_SIZE into *buflen, and initialize
|
|
|
|
* *buf_datalen to 0. Return 0 if success, or -1 if malloc fails.
|
|
|
|
*/
|
2002-08-24 06:59:21 +02:00
|
|
|
int buf_new(char **buf, int *buflen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(buf && buflen && buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
*buf = (char *)malloc(MAX_BUF_SIZE);
|
|
|
|
if(!*buf)
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
2003-03-04 05:36:37 +01:00
|
|
|
// memset(*buf,0,MAX_BUF_SIZE);
|
2002-06-30 09:37:49 +02:00
|
|
|
*buflen = MAX_BUF_SIZE;
|
|
|
|
*buf_datalen = 0;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
void buf_free(char *buf) {
|
2002-06-27 00:45:49 +02:00
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
|
2003-03-04 05:36:37 +01:00
|
|
|
/* read from socket s, writing onto buf+buf_datalen. If at_most is >= 0 then
|
|
|
|
* read at most 'at_most' bytes, and in any case don't read more than will fit based on buflen.
|
|
|
|
* If read() returns 0, set *reached_eof to 1 and return 0. If you want to tear
|
|
|
|
* down the connection return -1, else return the number of bytes read.
|
|
|
|
*/
|
2002-08-24 06:59:21 +02:00
|
|
|
int read_to_buf(int s, int at_most, char **buf, int *buflen, int *buf_datalen, int *reached_eof) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
int read_result;
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
assert(buf && *buf && buflen && buf_datalen && reached_eof && (s>=0));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
if(at_most < 0 || *buflen - *buf_datalen < at_most)
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
at_most = *buflen - *buf_datalen; /* take the min of the two */
|
|
|
|
/* (note that this only modifies at_most inside this function) */
|
|
|
|
|
|
|
|
if(at_most == 0)
|
|
|
|
return 0; /* we shouldn't read anything */
|
|
|
|
|
2002-08-23 08:49:43 +02:00
|
|
|
if(!options.LinkPadding && at_most > 10*sizeof(cell_t)) {
|
2002-08-24 06:59:21 +02:00
|
|
|
/* if no linkpadding: do a rudimentary round-robin so one
|
2003-03-10 23:40:02 +01:00
|
|
|
* connection can't hog a thickpipe
|
|
|
|
*/
|
|
|
|
at_most = 10*(CELL_PAYLOAD_SIZE - TOPIC_HEADER_SIZE);
|
|
|
|
/* XXX this still isn't perfect. now we read 10 data payloads per read --
|
|
|
|
* but if we're reading from a connection that speaks cells, we always
|
|
|
|
* read a partial cell from the network and can't process it yet. Good
|
|
|
|
* enough for now though. (And maybe best, to stress our code more.)
|
2002-08-23 08:49:43 +02:00
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
// log(LOG_DEBUG,"read_to_buf(): reading at most %d bytes.",at_most);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
read_result = read(s, *buf+*buf_datalen, at_most);
|
2002-06-27 00:45:49 +02:00
|
|
|
if (read_result < 0) {
|
|
|
|
if(errno!=EAGAIN) { /* it's a real error */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
} else if (read_result == 0) {
|
|
|
|
log(LOG_DEBUG,"read_to_buf(): Encountered eof");
|
2002-06-30 09:37:49 +02:00
|
|
|
*reached_eof = 1;
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else { /* we read some bytes */
|
2002-06-30 09:37:49 +02:00
|
|
|
*buf_datalen += read_result;
|
2002-07-18 08:37:58 +02:00
|
|
|
// log(LOG_DEBUG,"read_to_buf(): Read %d bytes. %d on inbuf.",read_result, *buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
return read_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2002-08-24 06:59:21 +02:00
|
|
|
int flush_buf(int s, char **buf, int *buflen, int *buf_flushlen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* push from buf onto s
|
|
|
|
* then memmove to front of buf
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
* return -1 or how many bytes remain to be flushed */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
int write_result;
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
assert(buf && *buf && buflen && buf_flushlen && buf_datalen && (s>=0) && (*buf_flushlen <= *buf_datalen));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(*buf_flushlen == 0) /* nothing to flush */
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2002-07-16 04:12:58 +02:00
|
|
|
write_result = write(s, *buf, *buf_flushlen);
|
2002-06-27 00:45:49 +02:00
|
|
|
if (write_result < 0) {
|
|
|
|
if(errno!=EAGAIN) { /* it's a real error */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
log(LOG_DEBUG,"flush_buf(): write() would block, returning.");
|
|
|
|
return 0;
|
|
|
|
} else {
|
2002-06-30 09:37:49 +02:00
|
|
|
*buf_datalen -= write_result;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
*buf_flushlen -= write_result;
|
2002-06-30 09:37:49 +02:00
|
|
|
memmove(*buf, *buf+write_result, *buf_datalen);
|
2002-07-18 08:37:58 +02:00
|
|
|
// log(LOG_DEBUG,"flush_buf(): flushed %d bytes, %d ready to flush, %d remain.",
|
|
|
|
// write_result,*buf_flushlen,*buf_datalen);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
return *buf_flushlen;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-08-24 06:59:21 +02:00
|
|
|
int write_to_buf(char *string, int string_len,
|
|
|
|
char **buf, int *buflen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* append string to buf (growing as needed, return -1 if "too big")
|
|
|
|
* return total number of bytes on the buf
|
|
|
|
*/
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(string && buf && *buf && buflen && buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
if (string_len + *buf_datalen > *buflen) { /* we're out of luck */
|
2002-06-27 00:45:49 +02:00
|
|
|
log(LOG_DEBUG, "write_to_buf(): buflen too small. Time to implement growing dynamic bufs.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
memcpy(*buf+*buf_datalen, string, string_len);
|
|
|
|
*buf_datalen += string_len;
|
2002-07-18 08:37:58 +02:00
|
|
|
// log(LOG_DEBUG,"write_to_buf(): added %d bytes to buf (now %d total).",string_len, *buf_datalen);
|
2002-06-30 09:37:49 +02:00
|
|
|
return *buf_datalen;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2003-03-17 03:42:45 +01:00
|
|
|
#ifdef USE_ZLIB
|
|
|
|
int compress_from_buf(char *string, int string_len,
|
|
|
|
char **buf_in, int *buflen_in, int *buf_datalen_in,
|
|
|
|
z_stream *zstream, int flush) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!*buf_datalen_in)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
zstream->next_in = *buf_in;
|
|
|
|
zstream->avail_in = *buf_datalen_in;
|
|
|
|
zstream->next_out = string;
|
|
|
|
zstream->avail_out = string_len;
|
|
|
|
|
|
|
|
err = deflate(zstream, flush);
|
|
|
|
|
|
|
|
switch (err)
|
|
|
|
{
|
|
|
|
case Z_OK:
|
|
|
|
case Z_STREAM_END:
|
|
|
|
memmove(*buf_in, zstream->next_in, zstream->avail_in);
|
|
|
|
*buf_datalen_in = zstream->avail_in;
|
|
|
|
return string_len - zstream->avail_out;
|
|
|
|
case Z_STREAM_ERROR:
|
|
|
|
case Z_BUF_ERROR:
|
|
|
|
log(LOG_ERR, "Error processing compression: %s", zstream->msg);
|
|
|
|
return -1;
|
|
|
|
default:
|
|
|
|
log(LOG_ERR, "Unknown return value from deflate: %d", err);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int decompress_buf_to_buf(char **buf_in, int *buflen_in, int *buf_datalen_in,
|
|
|
|
char **buf_out, int *buflen_out, int *buf_datalen_out,
|
|
|
|
z_stream *zstream, int flush)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
zstream->next_in = *buf_in;
|
|
|
|
zstream->avail_in = *buf_datalen_in;
|
|
|
|
zstream->next_out = *buf_out + *buf_datalen_out;
|
|
|
|
zstream->avail_out = *buflen_out - *buf_datalen_out;
|
|
|
|
|
|
|
|
if (!zstream->avail_in && !zstream->avail_out)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = inflate(zstream, flush);
|
|
|
|
|
|
|
|
switch (err)
|
|
|
|
{
|
|
|
|
case Z_OK:
|
|
|
|
case Z_STREAM_END:
|
|
|
|
memmove(*buf_in, zstream->next_in, zstream->avail_in);
|
|
|
|
*buf_datalen_in = zstream->avail_in;
|
|
|
|
*buf_datalen_out = *buflen_out - zstream->avail_out;
|
|
|
|
return 1;
|
|
|
|
case Z_STREAM_ERROR:
|
|
|
|
case Z_BUF_ERROR:
|
|
|
|
log(LOG_ERR, "Error processing compression: %s", zstream->msg);
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
log(LOG_ERR, "Unknown return value from deflate: %d", err);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-08-24 06:59:21 +02:00
|
|
|
int fetch_from_buf(char *string, int string_len,
|
2002-09-28 07:53:00 +02:00
|
|
|
char **buf, int *buflen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-28 07:53:00 +02:00
|
|
|
/* if there are string_len bytes in buf, write them onto string,
|
2002-06-27 00:45:49 +02:00
|
|
|
* then memmove buf back (that is, remove them from buf) */
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(string && buf && *buf && buflen && buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
if(string_len > *buf_datalen) /* we want too much. sorry. */
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
memcpy(string,*buf,string_len);
|
|
|
|
*buf_datalen -= string_len;
|
|
|
|
memmove(*buf, *buf+string_len, *buf_datalen);
|
|
|
|
return *buf_datalen;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2002-09-28 07:53:00 +02:00
|
|
|
int find_on_inbuf(char *string, int string_len,
|
|
|
|
char *buf, int buf_datalen) {
|
|
|
|
/* find first instance of needle 'string' on haystack 'buf'. return how
|
|
|
|
* many bytes from the beginning of buf to the end of string.
|
|
|
|
* If it's not there, return -1.
|
|
|
|
*/
|
|
|
|
|
|
|
|
char *location;
|
|
|
|
char *last_possible = buf + buf_datalen - string_len;
|
|
|
|
|
|
|
|
assert(string && string_len > 0 && buf);
|
|
|
|
|
|
|
|
if(buf_datalen < string_len)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for(location = buf; location <= last_possible; location++)
|
|
|
|
if((*location == *string) && !memcmp(location+1, string+1, string_len-1))
|
|
|
|
return location-buf+string_len;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|