Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Copyright 2001,2002 Roger Dingledine, Matej Pfajfar. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* buffers.c */
|
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
2002-08-23 08:49:43 +02:00
|
|
|
extern or_options_t options; /* command-line and config-file options */
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
struct buf_t {
|
|
|
|
char *buf;
|
|
|
|
size_t len;
|
|
|
|
size_t datalen;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BUF_OK(b) ((b) && (b)->buf && (b)->datalen <= (b)->len)
|
|
|
|
|
|
|
|
/* Find the first instance of str on buf. If none exists, return -1.
|
|
|
|
* Otherwise, return index of the first character in buf _after_ the
|
|
|
|
* first instance of str.
|
|
|
|
*/
|
|
|
|
static int find_str_in_str(const char *str, int str_len,
|
|
|
|
const char *buf, int buf_len)
|
|
|
|
{
|
|
|
|
const char *location;
|
|
|
|
const char *last_possible = buf + buf_len - str_len;
|
|
|
|
|
|
|
|
assert(str && str_len > 0 && buf);
|
|
|
|
|
|
|
|
if(buf_len < str_len)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for(location = buf; location <= last_possible; location++)
|
|
|
|
if((*location == *str) && !memcmp(location+1, str+1, str_len-1))
|
|
|
|
return location-buf+str_len;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-25 12:42:07 +02:00
|
|
|
int find_on_inbuf(char *string, int string_len, buf_t *buf) {
|
|
|
|
return find_str_in_str(string, string_len, buf->buf, buf->datalen);
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
/* Create and return a new buf of size 'size'
|
2003-03-04 05:36:37 +01:00
|
|
|
*/
|
2003-09-27 09:33:07 +02:00
|
|
|
buf_t *buf_new_with_capacity(size_t size) {
|
2003-09-25 07:17:11 +02:00
|
|
|
buf_t *buf;
|
|
|
|
buf = (buf_t*)tor_malloc(sizeof(buf_t));
|
|
|
|
buf->buf = (char *)tor_malloc(size);
|
|
|
|
buf->len = size;
|
|
|
|
buf->datalen = 0;
|
|
|
|
// memset(buf->buf,0,size);
|
|
|
|
|
|
|
|
assert(BUF_OK(buf));
|
|
|
|
return buf;
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
buf_t *buf_new()
|
|
|
|
{
|
|
|
|
return buf_new_with_capacity(MAX_BUF_SIZE);
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
size_t buf_datalen(const buf_t *buf)
|
|
|
|
{
|
|
|
|
return buf->datalen;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
size_t buf_capacity(const buf_t *buf)
|
|
|
|
{
|
|
|
|
return buf->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *_buf_peek_raw_buffer(const buf_t *buf)
|
|
|
|
{
|
|
|
|
return buf->buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
void buf_free(buf_t *buf) {
|
|
|
|
assert(buf && buf->buf);
|
|
|
|
free(buf->buf);
|
2002-06-27 00:45:49 +02:00
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
|
|
|
|
|
|
|
|
/* read from socket s, writing onto end of buf.
|
2003-03-04 05:36:37 +01:00
|
|
|
* read at most 'at_most' bytes, and in any case don't read more than will fit based on buflen.
|
|
|
|
* If read() returns 0, set *reached_eof to 1 and return 0. If you want to tear
|
|
|
|
* down the connection return -1, else return the number of bytes read.
|
|
|
|
*/
|
2003-09-25 07:17:11 +02:00
|
|
|
int read_to_buf(int s, int at_most, buf_t *buf, int *reached_eof) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
int read_result;
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
int e;
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(BUF_OK(buf) && reached_eof && (s>=0));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
if(at_most > buf->len - buf->datalen)
|
|
|
|
at_most = buf->len - buf->datalen; /* take the min of the two */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
|
|
|
if(at_most == 0)
|
|
|
|
return 0; /* we shouldn't read anything */
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
// log_fn(LOG_DEBUG,"reading at most %d bytes.",at_most);
|
2003-09-25 07:17:11 +02:00
|
|
|
read_result = read(s, buf->buf+buf->datalen, at_most);
|
2002-06-27 00:45:49 +02:00
|
|
|
if (read_result < 0) {
|
2003-08-14 19:13:52 +02:00
|
|
|
if(!ERRNO_EAGAIN(errno)) { /* it's a real error */
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
e = correct_socket_errno(s);
|
2003-09-09 01:10:24 +02:00
|
|
|
if(!ERRNO_EAGAIN(e)) { /* no, it *is* a real error! */
|
2003-08-14 19:13:52 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else if (read_result == 0) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Encountered eof");
|
2002-06-30 09:37:49 +02:00
|
|
|
*reached_eof = 1;
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else { /* we read some bytes */
|
2003-09-25 07:17:11 +02:00
|
|
|
buf->datalen += read_result;
|
|
|
|
log_fn(LOG_DEBUG,"Read %d bytes. %d on inbuf.",read_result,
|
|
|
|
(int)buf->datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
return read_result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
int read_to_buf_tls(tor_tls *tls, int at_most, buf_t *buf) {
|
2003-09-04 18:05:08 +02:00
|
|
|
int r;
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(tls && BUF_OK(buf));
|
2003-09-04 18:05:08 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
if (at_most > buf->len - buf->datalen)
|
|
|
|
at_most = buf->len - buf->datalen;
|
2003-09-04 18:05:08 +02:00
|
|
|
|
|
|
|
if (at_most == 0)
|
|
|
|
return 0;
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
r = tor_tls_read(tls, buf->buf+buf->datalen, at_most);
|
2003-09-04 18:05:08 +02:00
|
|
|
if (r<0)
|
|
|
|
return r;
|
2003-09-25 07:17:11 +02:00
|
|
|
buf->datalen += r;
|
|
|
|
log_fn(LOG_DEBUG,"Read %d bytes. %d on inbuf.",r, (int)buf->datalen);
|
2003-09-04 18:05:08 +02:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
int flush_buf(int s, buf_t *buf, int *buf_flushlen)
|
|
|
|
{
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* push from buf onto s
|
|
|
|
* then memmove to front of buf
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
* return -1 or how many bytes remain to be flushed */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
int write_result;
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
int e;
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(BUF_OK(buf) && buf_flushlen && (s>=0) && (*buf_flushlen <= buf->datalen));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(*buf_flushlen == 0) /* nothing to flush */
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
write_result = write(s, buf->buf, *buf_flushlen);
|
2002-06-27 00:45:49 +02:00
|
|
|
if (write_result < 0) {
|
2003-08-14 19:13:52 +02:00
|
|
|
if(!ERRNO_EAGAIN(errno)) { /* it's a real error */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
e = correct_socket_errno(s);
|
2003-09-09 01:10:24 +02:00
|
|
|
if(!ERRNO_EAGAIN(e)) { /* no, it *is* a real error! */
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-08-14 19:13:52 +02:00
|
|
|
#endif
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"write() would block, returning.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else {
|
2003-09-25 07:17:11 +02:00
|
|
|
buf->datalen -= write_result;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
*buf_flushlen -= write_result;
|
2003-09-25 07:17:11 +02:00
|
|
|
memmove(buf->buf, buf->buf+write_result, buf->datalen);
|
2003-09-18 10:11:31 +02:00
|
|
|
log_fn(LOG_DEBUG,"%d: flushed %d bytes, %d ready to flush, %d remain.",
|
2003-09-25 07:17:11 +02:00
|
|
|
s,write_result,*buf_flushlen,(int)buf->datalen);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
return *buf_flushlen;
|
2003-09-07 12:24:40 +02:00
|
|
|
/* XXX USE_TLS should change to return write_result like any sane function would */
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
int flush_buf_tls(tor_tls *tls, buf_t *buf, int *buf_flushlen)
|
2003-09-04 18:05:08 +02:00
|
|
|
{
|
|
|
|
int r;
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(tls && BUF_OK(buf) && buf_flushlen);
|
2003-09-07 12:24:40 +02:00
|
|
|
|
|
|
|
/* we want to let tls write even if flushlen is zero, because it might
|
|
|
|
* have a partial record pending */
|
2003-09-25 07:17:11 +02:00
|
|
|
r = tor_tls_write(tls, buf->buf, *buf_flushlen);
|
2003-09-04 18:05:08 +02:00
|
|
|
if (r < 0) {
|
|
|
|
return r;
|
|
|
|
}
|
2003-09-25 07:17:11 +02:00
|
|
|
buf->datalen -= r;
|
2003-09-04 18:05:08 +02:00
|
|
|
*buf_flushlen -= r;
|
2003-09-25 07:17:11 +02:00
|
|
|
memmove(buf->buf, buf->buf+r, buf->datalen);
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
log_fn(LOG_DEBUG,"flushed %d bytes, %d ready to flush, %d remain.",
|
2003-09-25 07:17:11 +02:00
|
|
|
r,*buf_flushlen,(int)buf->datalen);
|
2003-09-04 18:05:08 +02:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2003-09-25 12:42:07 +02:00
|
|
|
int write_to_buf(const char *string, int string_len, buf_t *buf) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* append string to buf (growing as needed, return -1 if "too big")
|
|
|
|
* return total number of bytes on the buf
|
|
|
|
*/
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(string && BUF_OK(buf));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
if (string_len + buf->datalen > buf->len) { /* we're out of luck */
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING, "buflen too small. Time to implement growing dynamic bufs.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
memcpy(buf->buf+buf->datalen, string, string_len);
|
|
|
|
buf->datalen += string_len;
|
|
|
|
log_fn(LOG_DEBUG,"added %d bytes to buf (now %d total).",string_len, (int)buf->datalen);
|
|
|
|
return buf->datalen;
|
2003-03-17 03:42:45 +01:00
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
int fetch_from_buf(char *string, int string_len, buf_t *buf) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-06-25 02:31:41 +02:00
|
|
|
/* There must be string_len bytes in buf; write them onto string,
|
2003-04-15 21:10:18 +02:00
|
|
|
* then memmove buf back (that is, remove them from buf).
|
|
|
|
*
|
|
|
|
* Return the number of bytes still on the buffer. */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(string && BUF_OK(buf));
|
|
|
|
assert(string_len <= buf->datalen); /* make sure we don't ask for too much */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
memcpy(string,buf->buf,string_len);
|
|
|
|
buf->datalen -= string_len;
|
|
|
|
memmove(buf->buf, buf->buf+string_len, buf->datalen);
|
|
|
|
return buf->datalen;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-09-17 22:09:06 +02:00
|
|
|
/* There is a (possibly incomplete) http statement on *buf, of the
|
|
|
|
* form "%s\r\n\r\n%s", headers, body.
|
|
|
|
* If a) the headers include a Content-Length field and all bytes in
|
|
|
|
* the body are present, or b) there's no Content-Length field and
|
|
|
|
* all headers are present, then:
|
|
|
|
* copy headers and body into the supplied args (and null terminate
|
|
|
|
* them), remove them from buf, and return 1.
|
|
|
|
* (If headers or body is NULL, discard that part of the buf.)
|
|
|
|
* If a headers or body doesn't fit in the arg, return -1.
|
|
|
|
*
|
|
|
|
* Else, change nothing and return 0.
|
|
|
|
*/
|
2003-09-25 07:17:11 +02:00
|
|
|
int fetch_from_buf_http(buf_t *buf,
|
2003-09-17 22:09:06 +02:00
|
|
|
char *headers_out, int max_headerlen,
|
|
|
|
char *body_out, int max_bodylen) {
|
|
|
|
char *headers, *body;
|
|
|
|
int i;
|
|
|
|
int headerlen, bodylen, contentlen;
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
assert(BUF_OK(buf));
|
2003-09-17 22:09:06 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
headers = buf->buf;
|
|
|
|
i = find_on_inbuf("\r\n\r\n", 4, buf);
|
2003-09-17 22:09:06 +02:00
|
|
|
if(i < 0) {
|
|
|
|
log_fn(LOG_DEBUG,"headers not all here yet.");
|
|
|
|
return 0;
|
|
|
|
}
|
2003-09-25 07:17:11 +02:00
|
|
|
body = buf->buf+i;
|
2003-09-17 22:09:06 +02:00
|
|
|
headerlen = body-headers; /* includes the CRLFCRLF */
|
2003-09-25 07:17:11 +02:00
|
|
|
bodylen = buf->datalen - headerlen;
|
2003-09-17 22:09:06 +02:00
|
|
|
log_fn(LOG_DEBUG,"headerlen %d, bodylen %d.",headerlen,bodylen);
|
|
|
|
|
|
|
|
if(headers_out && max_headerlen <= headerlen) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"headerlen %d larger than %d. Failing.", headerlen, max_headerlen-1);
|
2003-09-17 22:09:06 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(body_out && max_bodylen <= bodylen) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"bodylen %d larger than %d. Failing.", bodylen, max_bodylen-1);
|
2003-09-17 22:09:06 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
#define CONTENT_LENGTH "\r\nContent-Length: "
|
2003-09-25 12:42:07 +02:00
|
|
|
i = find_str_in_str(CONTENT_LENGTH, strlen(CONTENT_LENGTH),
|
2003-09-25 07:17:11 +02:00
|
|
|
headers, headerlen);
|
2003-09-17 22:09:06 +02:00
|
|
|
if(i > 0) {
|
|
|
|
contentlen = atoi(headers+i);
|
2003-09-25 07:17:11 +02:00
|
|
|
/* XXX What if content-length is malformed? */
|
2003-09-25 12:42:07 +02:00
|
|
|
log_fn(LOG_DEBUG,"Got a contentlen of %d.",contentlen);
|
2003-09-17 22:09:06 +02:00
|
|
|
if(bodylen < contentlen) {
|
|
|
|
log_fn(LOG_DEBUG,"body not all here yet.");
|
|
|
|
return 0; /* not all there yet */
|
|
|
|
}
|
|
|
|
bodylen = contentlen;
|
|
|
|
log_fn(LOG_DEBUG,"bodylen reduced to %d.",bodylen);
|
|
|
|
}
|
|
|
|
/* all happy. copy into the appropriate places, and return 1 */
|
|
|
|
if(headers_out) {
|
2003-09-25 07:17:11 +02:00
|
|
|
memcpy(headers_out,buf->buf,headerlen);
|
2003-09-17 22:09:06 +02:00
|
|
|
headers_out[headerlen] = 0; /* null terminate it */
|
|
|
|
}
|
|
|
|
if(body_out) {
|
2003-09-25 07:17:11 +02:00
|
|
|
memcpy(body_out,buf->buf+headerlen,bodylen);
|
2003-09-17 22:09:06 +02:00
|
|
|
body_out[bodylen] = 0; /* null terminate it */
|
|
|
|
}
|
2003-09-25 07:17:11 +02:00
|
|
|
buf->datalen -= (headerlen+bodylen);
|
2003-09-25 12:42:07 +02:00
|
|
|
memmove(buf->buf, buf->buf+headerlen+bodylen, buf->datalen);
|
2003-09-17 22:09:06 +02:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-09-18 10:11:31 +02:00
|
|
|
/* There is a (possibly incomplete) socks handshake on *buf, of the
|
|
|
|
* forms
|
|
|
|
* socks4: "socksheader || username\0".
|
|
|
|
* socks4a: "socksheader || username\0 || destaddr\0".
|
|
|
|
* If it's a complete and valid handshake, and destaddr fits in addr_out,
|
|
|
|
* then pull the handshake off the buf, assign to addr_out and port_out,
|
|
|
|
* and return 1.
|
|
|
|
* If it's invalid or too big, return -1.
|
2003-09-21 08:15:43 +02:00
|
|
|
* Else it's not all there yet, change nothing and return 0.
|
2003-09-18 10:11:31 +02:00
|
|
|
*/
|
2003-09-25 07:17:11 +02:00
|
|
|
int fetch_from_buf_socks(buf_t *buf,
|
2003-09-18 10:11:31 +02:00
|
|
|
char *addr_out, int max_addrlen,
|
|
|
|
uint16_t *port_out) {
|
2003-09-21 08:15:43 +02:00
|
|
|
socks4_t socks4_info;
|
|
|
|
char *tmpbuf=NULL;
|
2003-09-18 10:11:31 +02:00
|
|
|
uint16_t port;
|
|
|
|
enum {socks4, socks4a } socks_prot = socks4a;
|
|
|
|
char *next, *startaddr;
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
if(buf->datalen < sizeof(socks4_t)) /* basic info available? */
|
2003-09-18 10:11:31 +02:00
|
|
|
return 0; /* not yet */
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
/* an inlined socks4_unpack() */
|
2003-09-25 07:17:11 +02:00
|
|
|
socks4_info.version = (unsigned char) *(buf->buf);
|
|
|
|
socks4_info.command = (unsigned char) *(buf->buf+1);
|
|
|
|
socks4_info.destport = ntohs(*(uint16_t*)(buf->buf+2));
|
|
|
|
socks4_info.destip = ntohl(*(uint32_t*)(buf->buf+4));
|
2003-09-18 10:11:31 +02:00
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
if(socks4_info.version != 4) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Unrecognized version %d.",socks4_info.version);
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
if(socks4_info.command != 1) { /* not a connect? we don't support it. */
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"command %d not '1'.",socks4_info.command);
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:44:53 +02:00
|
|
|
port = socks4_info.destport;
|
2003-09-18 10:11:31 +02:00
|
|
|
if(!port) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Port is zero.");
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
if(!socks4_info.destip) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"DestIP is zero.");
|
2003-09-21 08:15:43 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(socks4_info.destip >> 8) {
|
|
|
|
struct in_addr in;
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_DEBUG,"destip not in form 0.0.0.x.");
|
2003-09-21 08:15:43 +02:00
|
|
|
in.s_addr = htonl(socks4_info.destip);
|
|
|
|
tmpbuf = inet_ntoa(in);
|
2003-09-18 10:11:31 +02:00
|
|
|
if(max_addrlen <= strlen(tmpbuf)) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"socks4 addr too long.");
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
log_fn(LOG_DEBUG,"Successfully read destip (%s)", tmpbuf);
|
|
|
|
socks_prot = socks4;
|
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
next = memchr(buf->buf+SOCKS4_NETWORK_LEN, 0, buf->datalen);
|
2003-09-18 10:11:31 +02:00
|
|
|
if(!next) {
|
|
|
|
log_fn(LOG_DEBUG,"Username not here yet.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
startaddr = next+1;
|
|
|
|
if(socks_prot == socks4a) {
|
2003-09-25 07:17:11 +02:00
|
|
|
next = memchr(startaddr, 0, buf->buf+buf->datalen-startaddr);
|
2003-09-18 10:11:31 +02:00
|
|
|
if(!next) {
|
|
|
|
log_fn(LOG_DEBUG,"Destaddr not here yet.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(max_addrlen <= next-startaddr) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Destaddr too long.");
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log_fn(LOG_DEBUG,"Everything is here. Success.");
|
|
|
|
*port_out = port;
|
|
|
|
strcpy(addr_out, socks_prot == socks4 ? tmpbuf : startaddr);
|
2003-09-25 07:17:11 +02:00
|
|
|
buf->datalen -= (next-buf->buf+1); /* next points to the final \0 on inbuf */
|
|
|
|
memmove(buf->buf, next+1, buf->datalen);
|
2003-09-18 10:11:31 +02:00
|
|
|
// log_fn(LOG_DEBUG,"buf_datalen is now %d:'%s'",*buf_datalen,buf);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|