2020-01-09 16:52:25 +00:00
|
|
|
/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */
|
2017-09-22 21:00:03 +00:00
|
|
|
/*
|
|
|
|
* http_client.c -- A simple HTTP/QUIC client
|
|
|
|
*/
|
2018-05-16 19:48:43 +00:00
|
|
|
|
2018-03-12 22:25:01 +00:00
|
|
|
#ifndef WIN32
|
2017-09-22 21:00:03 +00:00
|
|
|
#include <arpa/inet.h>
|
|
|
|
#include <netinet/in.h>
|
2018-03-12 22:25:01 +00:00
|
|
|
#else
|
|
|
|
#include <Windows.h>
|
|
|
|
#include <WinSock2.h>
|
|
|
|
#include <io.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <getopt.h>
|
|
|
|
#define STDOUT_FILENO 1
|
|
|
|
#define random rand
|
|
|
|
#pragma warning(disable:4996) //POSIX name deprecated
|
|
|
|
#endif
|
2017-09-22 21:00:03 +00:00
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
2019-09-11 15:27:58 +00:00
|
|
|
#include <inttypes.h>
|
2020-03-12 13:22:29 +00:00
|
|
|
#include <stddef.h>
|
2017-09-22 21:00:03 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/queue.h>
|
2018-03-12 22:25:01 +00:00
|
|
|
#ifndef WIN32
|
|
|
|
#include <unistd.h>
|
2017-09-22 21:00:03 +00:00
|
|
|
#include <sys/types.h>
|
2018-08-17 15:44:54 +00:00
|
|
|
#include <dirent.h>
|
2018-12-11 03:40:01 +00:00
|
|
|
#include <limits.h>
|
2018-03-12 22:25:01 +00:00
|
|
|
#endif
|
2017-09-22 21:00:03 +00:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
2018-12-27 19:01:17 +00:00
|
|
|
#include <event2/event.h>
|
2019-01-16 20:13:59 +00:00
|
|
|
#include <math.h>
|
2017-09-22 21:00:03 +00:00
|
|
|
|
2018-08-17 15:44:54 +00:00
|
|
|
#include <openssl/bio.h>
|
|
|
|
#include <openssl/pem.h>
|
|
|
|
#include <openssl/x509.h>
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
#include "lsquic.h"
|
|
|
|
#include "test_common.h"
|
|
|
|
#include "prog.h"
|
|
|
|
|
|
|
|
#include "../src/liblsquic/lsquic_logger.h"
|
2019-01-16 20:13:59 +00:00
|
|
|
#include "../src/liblsquic/lsquic_int_types.h"
|
|
|
|
#include "../src/liblsquic/lsquic_util.h"
|
2019-09-11 15:27:58 +00:00
|
|
|
/* include directly for reset_stream testing */
|
|
|
|
#include "../src/liblsquic/lsquic_varint.h"
|
|
|
|
#include "../src/liblsquic/lsquic_hq.h"
|
|
|
|
#include "../src/liblsquic/lsquic_sfcw.h"
|
|
|
|
#include "../src/liblsquic/lsquic_hash.h"
|
|
|
|
#include "../src/liblsquic/lsquic_stream.h"
|
|
|
|
/* include directly for retire_cid testing */
|
|
|
|
#include "../src/liblsquic/lsquic_conn.h"
|
2020-03-12 12:41:53 +00:00
|
|
|
#include "lsxpack_header.h"
|
2017-09-22 21:00:03 +00:00
|
|
|
|
2019-01-09 22:17:38 +00:00
|
|
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
/* This is used to exercise generating and sending of priority frames */
|
|
|
|
static int randomly_reprioritize_streams;
|
|
|
|
|
2019-01-16 20:13:59 +00:00
|
|
|
static int s_display_cert_chain;
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
/* If this file descriptor is open, the client will accept server push and
|
|
|
|
* dump the contents here. See -u flag.
|
|
|
|
*/
|
|
|
|
static int promise_fd = -1;
|
|
|
|
|
2018-08-28 13:59:47 +00:00
|
|
|
/* Set to true value to use header bypass. This means that the use code
|
|
|
|
* creates header set via callbacks and then fetches it by calling
|
|
|
|
* lsquic_stream_get_hset() when the first "on_read" event is called.
|
|
|
|
*/
|
|
|
|
static int g_header_bypass;
|
|
|
|
|
2019-01-16 20:13:59 +00:00
|
|
|
static int s_discard_response;
|
|
|
|
|
2020-09-08 15:43:03 +00:00
|
|
|
/* If set to a non-zero value, abandon reading from stream early: read at
|
|
|
|
* most `s_abandon_early' bytes and then close the stream.
|
|
|
|
*/
|
|
|
|
static long s_abandon_early;
|
|
|
|
|
2019-01-16 20:13:59 +00:00
|
|
|
struct sample_stats
|
|
|
|
{
|
|
|
|
unsigned n;
|
|
|
|
unsigned long min, max;
|
|
|
|
unsigned long sum; /* To calculate mean */
|
|
|
|
unsigned long sum_X2; /* To calculate stddev */
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct sample_stats s_stat_to_conn, /* Time to connect */
|
|
|
|
s_stat_ttfb,
|
|
|
|
s_stat_req; /* From TTFB to EOS */
|
|
|
|
static unsigned s_stat_conns_ok, s_stat_conns_failed;
|
|
|
|
static unsigned long s_stat_downloaded_bytes;
|
|
|
|
|
|
|
|
static void
|
|
|
|
update_sample_stats (struct sample_stats *stats, unsigned long val)
|
|
|
|
{
|
|
|
|
LSQ_DEBUG("%s: %p: %lu", __func__, stats, val);
|
|
|
|
if (stats->n)
|
|
|
|
{
|
|
|
|
if (val < stats->min)
|
|
|
|
stats->min = val;
|
|
|
|
else if (val > stats->max)
|
|
|
|
stats->max = val;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stats->min = val;
|
|
|
|
stats->max = val;
|
|
|
|
}
|
|
|
|
stats->sum += val;
|
|
|
|
stats->sum_X2 += val * val;
|
|
|
|
++stats->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
calc_sample_stats (const struct sample_stats *stats,
|
|
|
|
long double *mean_p, long double *stddev_p)
|
|
|
|
{
|
|
|
|
unsigned long mean, tmp;
|
|
|
|
|
|
|
|
if (stats->n)
|
|
|
|
{
|
|
|
|
mean = stats->sum / stats->n;
|
|
|
|
*mean_p = (long double) mean;
|
|
|
|
if (stats->n > 1)
|
|
|
|
{
|
|
|
|
tmp = stats->sum_X2 - stats->n * mean * mean;
|
|
|
|
tmp /= stats->n - 1;
|
|
|
|
*stddev_p = sqrtl((long double) tmp);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
*stddev_p = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
*mean_p = 0;
|
|
|
|
*stddev_p = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-07 13:41:26 +00:00
|
|
|
/* When more than `nread' bytes are read from stream `stream_id', apply
|
|
|
|
* priority in `ehp'.
|
|
|
|
*/
|
|
|
|
struct priority_spec
|
|
|
|
{
|
|
|
|
enum {
|
|
|
|
PRIORITY_SPEC_ACTIVE = 1 << 0,
|
|
|
|
} flags;
|
|
|
|
lsquic_stream_id_t stream_id;
|
|
|
|
size_t nread;
|
|
|
|
struct lsquic_ext_http_prio ehp;
|
|
|
|
};
|
|
|
|
static struct priority_spec *s_priority_specs;
|
|
|
|
static unsigned s_n_prio_specs;
|
|
|
|
|
|
|
|
static void
|
|
|
|
maybe_perform_priority_actions (struct lsquic_stream *, lsquic_stream_ctx_t *);
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
struct lsquic_conn_ctx;
|
|
|
|
|
|
|
|
struct path_elem {
|
|
|
|
TAILQ_ENTRY(path_elem) next_pe;
|
|
|
|
const char *path;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct http_client_ctx {
|
|
|
|
const char *hostname;
|
|
|
|
const char *method;
|
|
|
|
const char *payload;
|
|
|
|
char payload_size[20];
|
|
|
|
|
|
|
|
/* hcc_path_elems holds a list of paths which are to be requested from
|
|
|
|
* the server. Each new request gets the next path from the list (the
|
|
|
|
* iterator is stored in hcc_cur_pe); when the end is reached, the
|
|
|
|
* iterator wraps around.
|
|
|
|
*/
|
|
|
|
TAILQ_HEAD(, path_elem) hcc_path_elems;
|
|
|
|
struct path_elem *hcc_cur_pe;
|
|
|
|
|
|
|
|
unsigned hcc_total_n_reqs;
|
|
|
|
unsigned hcc_reqs_per_conn;
|
|
|
|
unsigned hcc_concurrency;
|
2019-01-09 22:17:38 +00:00
|
|
|
unsigned hcc_cc_reqs_per_conn;
|
2017-09-22 21:00:03 +00:00
|
|
|
unsigned hcc_n_open_conns;
|
2019-09-11 15:27:58 +00:00
|
|
|
unsigned hcc_reset_after_nbytes;
|
|
|
|
unsigned hcc_retire_cid_after_nbytes;
|
2019-02-04 13:59:11 +00:00
|
|
|
|
2020-07-06 21:35:21 +00:00
|
|
|
char *hcc_sess_resume_file_name;
|
2017-09-22 21:00:03 +00:00
|
|
|
|
|
|
|
enum {
|
2020-07-06 21:35:21 +00:00
|
|
|
HCC_SKIP_SESS_RESUME = (1 << 0),
|
2018-02-26 21:01:16 +00:00
|
|
|
HCC_SEEN_FIN = (1 << 1),
|
|
|
|
HCC_ABORT_ON_INCOMPLETE = (1 << 2),
|
2017-09-22 21:00:03 +00:00
|
|
|
} hcc_flags;
|
|
|
|
struct prog *prog;
|
2019-09-11 15:27:58 +00:00
|
|
|
const char *qif_file;
|
|
|
|
FILE *qif_fh;
|
2017-09-22 21:00:03 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct lsquic_conn_ctx {
|
|
|
|
TAILQ_ENTRY(lsquic_conn_ctx) next_ch;
|
|
|
|
lsquic_conn_t *conn;
|
|
|
|
struct http_client_ctx *client_ctx;
|
2019-01-16 20:13:59 +00:00
|
|
|
lsquic_time_t ch_created;
|
2017-09-22 21:00:03 +00:00
|
|
|
unsigned ch_n_reqs; /* This number gets decremented as streams are closed and
|
|
|
|
* incremented as push promises are accepted.
|
|
|
|
*/
|
2019-01-09 22:17:38 +00:00
|
|
|
unsigned ch_n_cc_streams; /* This number is incremented as streams are opened
|
|
|
|
* and decremented as streams are closed. It should
|
|
|
|
* never exceed hcc_cc_reqs_per_conn in client_ctx.
|
|
|
|
*/
|
2019-09-11 15:27:58 +00:00
|
|
|
enum {
|
2020-07-06 21:35:21 +00:00
|
|
|
CH_SESSION_RESUME_SAVED = 1 << 0,
|
2019-09-11 15:27:58 +00:00
|
|
|
} ch_flags;
|
2017-09-22 21:00:03 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2018-08-28 13:59:47 +00:00
|
|
|
struct hset_elem
|
|
|
|
{
|
|
|
|
STAILQ_ENTRY(hset_elem) next;
|
2020-05-27 14:26:32 +00:00
|
|
|
size_t nalloc;
|
2020-03-12 12:41:53 +00:00
|
|
|
struct lsxpack_header xhdr;
|
2018-08-28 13:59:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
STAILQ_HEAD(hset, hset_elem);
|
|
|
|
|
|
|
|
static void
|
|
|
|
hset_dump (const struct hset *, FILE *);
|
|
|
|
static void
|
|
|
|
hset_destroy (void *hset);
|
2019-01-16 20:13:59 +00:00
|
|
|
static void
|
|
|
|
display_cert_chain (lsquic_conn_t *);
|
2018-08-28 13:59:47 +00:00
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
static void
|
|
|
|
create_connections (struct http_client_ctx *client_ctx)
|
|
|
|
{
|
2019-09-11 15:27:58 +00:00
|
|
|
size_t len;
|
|
|
|
FILE *file;
|
2020-07-06 21:35:21 +00:00
|
|
|
unsigned char sess_resume[0x2000];
|
2019-09-11 15:27:58 +00:00
|
|
|
|
2020-07-06 21:35:21 +00:00
|
|
|
if (0 == (client_ctx->hcc_flags & HCC_SKIP_SESS_RESUME)
|
|
|
|
&& client_ctx->hcc_sess_resume_file_name)
|
2019-02-04 13:59:11 +00:00
|
|
|
{
|
2020-07-06 21:35:21 +00:00
|
|
|
file = fopen(client_ctx->hcc_sess_resume_file_name, "rb");
|
2019-09-11 15:27:58 +00:00
|
|
|
if (!file)
|
|
|
|
{
|
|
|
|
LSQ_DEBUG("cannot open %s for reading: %s",
|
2020-07-06 21:35:21 +00:00
|
|
|
client_ctx->hcc_sess_resume_file_name, strerror(errno));
|
2019-09-11 15:27:58 +00:00
|
|
|
goto no_file;
|
|
|
|
}
|
2020-07-06 21:35:21 +00:00
|
|
|
len = fread(sess_resume, 1, sizeof(sess_resume), file);
|
2019-09-11 15:27:58 +00:00
|
|
|
if (0 == len && !feof(file))
|
|
|
|
LSQ_WARN("error reading %s: %s",
|
2020-07-06 21:35:21 +00:00
|
|
|
client_ctx->hcc_sess_resume_file_name, strerror(errno));
|
2019-09-11 15:27:58 +00:00
|
|
|
fclose(file);
|
2020-07-06 21:35:21 +00:00
|
|
|
LSQ_INFO("create connection sess_resume %zu bytes", len);
|
2019-02-04 13:59:11 +00:00
|
|
|
}
|
2019-09-11 15:27:58 +00:00
|
|
|
else no_file:
|
|
|
|
len = 0;
|
2019-02-04 13:59:11 +00:00
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
while (client_ctx->hcc_n_open_conns < client_ctx->hcc_concurrency &&
|
|
|
|
client_ctx->hcc_total_n_reqs > 0)
|
2020-07-06 21:35:21 +00:00
|
|
|
if (0 != prog_connect(client_ctx->prog, len ? sess_resume : NULL, len))
|
2017-09-22 21:00:03 +00:00
|
|
|
{
|
|
|
|
LSQ_ERROR("connection failed");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-09 22:17:38 +00:00
|
|
|
static void
|
|
|
|
create_streams (struct http_client_ctx *client_ctx, lsquic_conn_ctx_t *conn_h)
|
|
|
|
{
|
|
|
|
while (conn_h->ch_n_reqs - conn_h->ch_n_cc_streams &&
|
|
|
|
conn_h->ch_n_cc_streams < client_ctx->hcc_cc_reqs_per_conn)
|
|
|
|
{
|
|
|
|
lsquic_conn_make_stream(conn_h->conn);
|
|
|
|
conn_h->ch_n_cc_streams++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
static lsquic_conn_ctx_t *
|
|
|
|
http_client_on_new_conn (void *stream_if_ctx, lsquic_conn_t *conn)
|
|
|
|
{
|
|
|
|
struct http_client_ctx *client_ctx = stream_if_ctx;
|
|
|
|
lsquic_conn_ctx_t *conn_h = calloc(1, sizeof(*conn_h));
|
|
|
|
conn_h->conn = conn;
|
|
|
|
conn_h->client_ctx = client_ctx;
|
2019-01-09 22:17:38 +00:00
|
|
|
conn_h->ch_n_reqs = MIN(client_ctx->hcc_total_n_reqs,
|
|
|
|
client_ctx->hcc_reqs_per_conn);
|
2017-09-22 21:00:03 +00:00
|
|
|
client_ctx->hcc_total_n_reqs -= conn_h->ch_n_reqs;
|
|
|
|
++conn_h->client_ctx->hcc_n_open_conns;
|
2019-02-18 13:40:51 +00:00
|
|
|
if (!TAILQ_EMPTY(&client_ctx->hcc_path_elems))
|
|
|
|
create_streams(client_ctx, conn_h);
|
2019-01-16 20:13:59 +00:00
|
|
|
conn_h->ch_created = lsquic_time_now();
|
2017-09-22 21:00:03 +00:00
|
|
|
return conn_h;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-12-27 19:01:17 +00:00
|
|
|
struct create_another_conn_or_stop_ctx
|
|
|
|
{
|
|
|
|
struct event *event;
|
|
|
|
struct http_client_ctx *client_ctx;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
create_another_conn_or_stop (evutil_socket_t sock, short events, void *ctx)
|
|
|
|
{
|
|
|
|
struct create_another_conn_or_stop_ctx *const cacos = ctx;
|
|
|
|
struct http_client_ctx *const client_ctx = cacos->client_ctx;
|
|
|
|
|
|
|
|
event_del(cacos->event);
|
|
|
|
event_free(cacos->event);
|
|
|
|
free(cacos);
|
|
|
|
|
|
|
|
create_connections(client_ctx);
|
|
|
|
if (0 == client_ctx->hcc_n_open_conns)
|
|
|
|
{
|
|
|
|
LSQ_INFO("All connections are closed: stop engine");
|
|
|
|
prog_stop(client_ctx->prog);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
static void
|
|
|
|
http_client_on_conn_closed (lsquic_conn_t *conn)
|
|
|
|
{
|
|
|
|
lsquic_conn_ctx_t *conn_h = lsquic_conn_get_ctx(conn);
|
2018-12-27 19:01:17 +00:00
|
|
|
struct create_another_conn_or_stop_ctx *cacos;
|
2018-02-26 21:01:16 +00:00
|
|
|
enum LSQUIC_CONN_STATUS status;
|
2018-12-27 19:01:17 +00:00
|
|
|
struct event_base *eb;
|
2018-02-26 21:01:16 +00:00
|
|
|
char errmsg[80];
|
|
|
|
|
|
|
|
status = lsquic_conn_status(conn, errmsg, sizeof(errmsg));
|
|
|
|
LSQ_INFO("Connection closed. Status: %d. Message: %s", status,
|
|
|
|
errmsg[0] ? errmsg : "<not set>");
|
|
|
|
if (conn_h->client_ctx->hcc_flags & HCC_ABORT_ON_INCOMPLETE)
|
2018-03-09 19:17:39 +00:00
|
|
|
{
|
|
|
|
if (!(conn_h->client_ctx->hcc_flags & HCC_SEEN_FIN))
|
|
|
|
abort();
|
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
--conn_h->client_ctx->hcc_n_open_conns;
|
2018-12-27 19:01:17 +00:00
|
|
|
|
|
|
|
cacos = calloc(1, sizeof(*cacos));
|
|
|
|
if (!cacos)
|
2017-09-22 21:00:03 +00:00
|
|
|
{
|
2018-12-27 19:01:17 +00:00
|
|
|
LSQ_ERROR("cannot allocate cacos");
|
|
|
|
exit(1);
|
2017-09-22 21:00:03 +00:00
|
|
|
}
|
2018-12-27 19:01:17 +00:00
|
|
|
eb = prog_eb(conn_h->client_ctx->prog);
|
|
|
|
cacos->client_ctx = conn_h->client_ctx;
|
|
|
|
cacos->event = event_new(eb, -1, 0, create_another_conn_or_stop, cacos);
|
|
|
|
if (!cacos->event)
|
|
|
|
{
|
|
|
|
LSQ_ERROR("cannot allocate event");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
if (0 != event_add(cacos->event, NULL))
|
|
|
|
{
|
|
|
|
LSQ_ERROR("cannot add cacos event");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
event_active(cacos->event, 0, 0);
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
free(conn_h);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
static int
|
|
|
|
hsk_status_ok (enum lsquic_hsk_status status)
|
|
|
|
{
|
2020-07-06 21:35:21 +00:00
|
|
|
return status == LSQ_HSK_OK || status == LSQ_HSK_RESUMED_OK;
|
2019-09-11 15:27:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-21 19:02:33 +00:00
|
|
|
static void
|
2019-02-04 13:59:11 +00:00
|
|
|
http_client_on_hsk_done (lsquic_conn_t *conn, enum lsquic_hsk_status status)
|
2018-05-21 19:02:33 +00:00
|
|
|
{
|
2019-09-11 15:27:58 +00:00
|
|
|
lsquic_conn_ctx_t *conn_h = lsquic_conn_get_ctx(conn);
|
|
|
|
struct http_client_ctx *client_ctx = conn_h->client_ctx;
|
|
|
|
|
|
|
|
if (hsk_status_ok(status))
|
2019-02-04 13:59:11 +00:00
|
|
|
LSQ_INFO("handshake success %s",
|
2020-07-06 21:35:21 +00:00
|
|
|
status == LSQ_HSK_RESUMED_OK ? "(session resumed)" : "");
|
2019-09-11 15:27:58 +00:00
|
|
|
else if (status == LSQ_HSK_FAIL)
|
|
|
|
LSQ_INFO("handshake failed");
|
2020-07-06 21:35:21 +00:00
|
|
|
else if (status == LSQ_HSK_RESUMED_FAIL)
|
2019-02-04 13:59:11 +00:00
|
|
|
{
|
2020-07-06 21:35:21 +00:00
|
|
|
LSQ_INFO("handshake failed because of session resumption, will retry "
|
|
|
|
"without it");
|
|
|
|
client_ctx->hcc_flags |= HCC_SKIP_SESS_RESUME;
|
2019-09-11 15:27:58 +00:00
|
|
|
++client_ctx->hcc_concurrency;
|
|
|
|
++client_ctx->hcc_total_n_reqs;
|
2019-02-04 13:59:11 +00:00
|
|
|
}
|
2019-09-11 15:27:58 +00:00
|
|
|
else
|
|
|
|
assert(0);
|
2019-01-16 20:13:59 +00:00
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
if (hsk_status_ok(status) && s_display_cert_chain)
|
2019-01-16 20:13:59 +00:00
|
|
|
display_cert_chain(conn);
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
if (hsk_status_ok(status))
|
2019-01-16 20:13:59 +00:00
|
|
|
{
|
|
|
|
conn_h = lsquic_conn_get_ctx(conn);
|
|
|
|
++s_stat_conns_ok;
|
|
|
|
update_sample_stats(&s_stat_to_conn,
|
|
|
|
lsquic_time_now() - conn_h->ch_created);
|
2019-02-18 13:40:51 +00:00
|
|
|
if (TAILQ_EMPTY(&client_ctx->hcc_path_elems))
|
|
|
|
{
|
|
|
|
LSQ_INFO("no paths mode: close connection");
|
|
|
|
lsquic_conn_close(conn_h->conn);
|
|
|
|
}
|
2019-01-16 20:13:59 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
++s_stat_conns_failed;
|
2018-05-21 19:02:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
static void
|
2020-07-06 21:35:21 +00:00
|
|
|
http_client_on_sess_resume_info (lsquic_conn_t *conn, const unsigned char *buf,
|
2019-09-11 15:27:58 +00:00
|
|
|
size_t bufsz)
|
|
|
|
{
|
|
|
|
lsquic_conn_ctx_t *const conn_h = lsquic_conn_get_ctx(conn);
|
|
|
|
struct http_client_ctx *const client_ctx = conn_h->client_ctx;
|
|
|
|
FILE *file;
|
|
|
|
size_t nw;
|
|
|
|
|
2020-07-06 21:35:21 +00:00
|
|
|
assert(client_ctx->hcc_sess_resume_file_name);
|
2019-09-11 15:27:58 +00:00
|
|
|
|
|
|
|
/* Our client is rather limited: only one file and only one ticket per
|
|
|
|
* connection can be saved.
|
|
|
|
*/
|
2020-07-06 21:35:21 +00:00
|
|
|
if (conn_h->ch_flags & CH_SESSION_RESUME_SAVED)
|
2019-09-11 15:27:58 +00:00
|
|
|
{
|
2020-07-06 21:35:21 +00:00
|
|
|
LSQ_DEBUG("session resumption information already saved for this "
|
|
|
|
"connection");
|
2019-09-11 15:27:58 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-06 21:35:21 +00:00
|
|
|
file = fopen(client_ctx->hcc_sess_resume_file_name, "wb");
|
2019-09-11 15:27:58 +00:00
|
|
|
if (!file)
|
|
|
|
{
|
|
|
|
LSQ_WARN("cannot open %s for writing: %s",
|
2020-07-06 21:35:21 +00:00
|
|
|
client_ctx->hcc_sess_resume_file_name, strerror(errno));
|
2019-09-11 15:27:58 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
nw = fwrite(buf, 1, bufsz, file);
|
|
|
|
if (nw == bufsz)
|
|
|
|
{
|
2020-07-06 21:35:21 +00:00
|
|
|
LSQ_DEBUG("wrote %zd bytes of session resumption information to %s",
|
|
|
|
nw, client_ctx->hcc_sess_resume_file_name);
|
|
|
|
conn_h->ch_flags |= CH_SESSION_RESUME_SAVED;
|
2019-09-11 15:27:58 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
LSQ_WARN("error: fwrite(%s) returns %zd instead of %zd: %s",
|
2020-07-06 21:35:21 +00:00
|
|
|
client_ctx->hcc_sess_resume_file_name, nw, bufsz, strerror(errno));
|
2019-09-11 15:27:58 +00:00
|
|
|
|
|
|
|
fclose(file);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
struct lsquic_stream_ctx {
|
|
|
|
lsquic_stream_t *stream;
|
|
|
|
struct http_client_ctx *client_ctx;
|
|
|
|
const char *path;
|
|
|
|
enum {
|
|
|
|
HEADERS_SENT = (1 << 0),
|
2019-01-16 20:13:59 +00:00
|
|
|
PROCESSED_HEADERS = 1 << 1,
|
2020-09-08 15:43:03 +00:00
|
|
|
ABANDON = 1 << 2, /* Abandon reading from stream after sh_stop bytes
|
|
|
|
* have been read.
|
|
|
|
*/
|
2017-09-22 21:00:03 +00:00
|
|
|
} sh_flags;
|
2019-01-16 20:13:59 +00:00
|
|
|
lsquic_time_t sh_created;
|
|
|
|
lsquic_time_t sh_ttfb;
|
2020-09-08 15:43:03 +00:00
|
|
|
size_t sh_stop; /* Stop after reading this many bytes if ABANDON is set */
|
|
|
|
size_t sh_nread; /* Number of bytes read from stream using one of
|
|
|
|
* lsquic_stream_read* functions.
|
|
|
|
*/
|
2017-09-22 21:00:03 +00:00
|
|
|
unsigned count;
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
struct lsquic_reader reader;
|
2017-09-22 21:00:03 +00:00
|
|
|
};
|
|
|
|
|
2018-05-16 19:48:43 +00:00
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
static lsquic_stream_ctx_t *
|
|
|
|
http_client_on_new_stream (void *stream_if_ctx, lsquic_stream_t *stream)
|
|
|
|
{
|
|
|
|
const int pushed = lsquic_stream_is_pushed(stream);
|
|
|
|
|
|
|
|
if (pushed)
|
|
|
|
{
|
|
|
|
LSQ_INFO("not accepting server push");
|
|
|
|
lsquic_stream_refuse_push(stream);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
lsquic_stream_ctx_t *st_h = calloc(1, sizeof(*st_h));
|
|
|
|
st_h->stream = stream;
|
|
|
|
st_h->client_ctx = stream_if_ctx;
|
2019-01-16 20:13:59 +00:00
|
|
|
st_h->sh_created = lsquic_time_now();
|
2017-09-22 21:00:03 +00:00
|
|
|
if (st_h->client_ctx->hcc_cur_pe)
|
|
|
|
{
|
|
|
|
st_h->client_ctx->hcc_cur_pe = TAILQ_NEXT(
|
|
|
|
st_h->client_ctx->hcc_cur_pe, next_pe);
|
|
|
|
if (!st_h->client_ctx->hcc_cur_pe) /* Wrap around */
|
|
|
|
st_h->client_ctx->hcc_cur_pe =
|
|
|
|
TAILQ_FIRST(&st_h->client_ctx->hcc_path_elems);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
st_h->client_ctx->hcc_cur_pe = TAILQ_FIRST(
|
|
|
|
&st_h->client_ctx->hcc_path_elems);
|
|
|
|
st_h->path = st_h->client_ctx->hcc_cur_pe->path;
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
if (st_h->client_ctx->payload)
|
|
|
|
{
|
|
|
|
st_h->reader.lsqr_read = test_reader_read;
|
|
|
|
st_h->reader.lsqr_size = test_reader_size;
|
|
|
|
st_h->reader.lsqr_ctx = create_lsquic_reader_ctx(st_h->client_ctx->payload);
|
|
|
|
if (!st_h->reader.lsqr_ctx)
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
st_h->reader.lsqr_ctx = NULL;
|
2017-09-22 21:00:03 +00:00
|
|
|
LSQ_INFO("created new stream, path: %s", st_h->path);
|
|
|
|
lsquic_stream_wantwrite(stream, 1);
|
2019-01-21 14:07:02 +00:00
|
|
|
if (randomly_reprioritize_streams)
|
2020-10-07 13:41:26 +00:00
|
|
|
{
|
|
|
|
if ((1 << lsquic_conn_quic_version(lsquic_stream_conn(stream)))
|
|
|
|
& LSQUIC_IETF_VERSIONS)
|
|
|
|
lsquic_stream_set_http_prio(stream,
|
|
|
|
&(struct lsquic_ext_http_prio){
|
|
|
|
.urgency = random() & 7,
|
|
|
|
.incremental = random() & 1,
|
|
|
|
}
|
|
|
|
);
|
|
|
|
else
|
|
|
|
lsquic_stream_set_priority(stream, 1 + (random() & 0xFF));
|
|
|
|
}
|
|
|
|
if (s_priority_specs)
|
|
|
|
maybe_perform_priority_actions(stream, st_h);
|
2020-09-08 15:43:03 +00:00
|
|
|
if (s_abandon_early)
|
|
|
|
{
|
|
|
|
st_h->sh_stop = random() % (s_abandon_early + 1);
|
|
|
|
st_h->sh_flags |= ABANDON;
|
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
|
|
|
|
return st_h;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
send_headers (lsquic_stream_ctx_t *st_h)
|
|
|
|
{
|
2018-05-16 19:48:43 +00:00
|
|
|
const char *hostname = st_h->client_ctx->hostname;
|
2020-06-03 04:13:30 +00:00
|
|
|
struct header_buf hbuf;
|
2020-10-07 13:41:26 +00:00
|
|
|
unsigned h_idx = 0;
|
2018-05-16 19:48:43 +00:00
|
|
|
if (!hostname)
|
|
|
|
hostname = st_h->client_ctx->prog->prog_hostname;
|
2020-06-03 04:13:30 +00:00
|
|
|
hbuf.off = 0;
|
2020-10-07 13:41:26 +00:00
|
|
|
struct lsxpack_header headers_arr[9];
|
2020-03-30 17:34:43 +00:00
|
|
|
#define V(v) (v), strlen(v)
|
2020-10-07 13:41:26 +00:00
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V(":method"), V(st_h->client_ctx->method));
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V(":scheme"), V("https"));
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V(":path"), V(st_h->path));
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V(":authority"), V(hostname));
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V("user-agent"), V(st_h->client_ctx->prog->prog_settings.es_ua));
|
|
|
|
if (randomly_reprioritize_streams)
|
|
|
|
{
|
|
|
|
char pfv[10];
|
|
|
|
sprintf(pfv, "u=%ld", random() & 7);
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V("priority"), V(pfv));
|
|
|
|
if (random() & 1)
|
|
|
|
sprintf(pfv, "i");
|
|
|
|
else
|
|
|
|
sprintf(pfv, "i=?0");
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V("priority"), V(pfv));
|
|
|
|
}
|
|
|
|
if (st_h->client_ctx->payload)
|
|
|
|
{
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V("content-type"), V("application/octet-stream"));
|
|
|
|
header_set_ptr(&headers_arr[h_idx++], &hbuf, V("content-length"), V( st_h->client_ctx->payload_size));
|
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
lsquic_http_headers_t headers = {
|
2020-10-07 13:41:26 +00:00
|
|
|
.count = h_idx,
|
2017-09-22 21:00:03 +00:00
|
|
|
.headers = headers_arr,
|
|
|
|
};
|
|
|
|
if (0 != lsquic_stream_send_headers(st_h->stream, &headers,
|
|
|
|
st_h->client_ctx->payload == NULL))
|
|
|
|
{
|
|
|
|
LSQ_ERROR("cannot send headers: %s", strerror(errno));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-17 15:44:54 +00:00
|
|
|
/* This is here to exercise lsquic_conn_get_server_cert_chain() API */
|
|
|
|
static void
|
|
|
|
display_cert_chain (lsquic_conn_t *conn)
|
|
|
|
{
|
|
|
|
STACK_OF(X509) *chain;
|
|
|
|
X509_NAME *name;
|
|
|
|
X509 *cert;
|
|
|
|
unsigned i;
|
|
|
|
char buf[100];
|
|
|
|
|
|
|
|
chain = lsquic_conn_get_server_cert_chain(conn);
|
|
|
|
if (!chain)
|
|
|
|
{
|
|
|
|
LSQ_WARN("could not get server certificate chain");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sk_X509_num(chain); ++i)
|
|
|
|
{
|
|
|
|
cert = sk_X509_value(chain, i);
|
|
|
|
name = X509_get_subject_name(cert);
|
|
|
|
LSQ_INFO("cert #%u: name: %s", i,
|
|
|
|
X509_NAME_oneline(name, buf, sizeof(buf)));
|
2018-08-27 17:55:06 +00:00
|
|
|
X509_free(cert);
|
2018-08-17 15:44:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sk_X509_free(chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
static void
|
|
|
|
http_client_on_write (lsquic_stream_t *stream, lsquic_stream_ctx_t *st_h)
|
|
|
|
{
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
ssize_t nw;
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
if (st_h->sh_flags & HEADERS_SENT)
|
|
|
|
{
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
if (st_h->client_ctx->payload && test_reader_size(st_h->reader.lsqr_ctx) > 0)
|
2017-09-22 21:00:03 +00:00
|
|
|
{
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
nw = lsquic_stream_writef(stream, &st_h->reader);
|
|
|
|
if (nw < 0)
|
2017-09-22 21:00:03 +00:00
|
|
|
{
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
LSQ_ERROR("write error: %s", strerror(errno));
|
2017-09-22 21:00:03 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
if (test_reader_size(st_h->reader.lsqr_ctx) > 0)
|
|
|
|
{
|
|
|
|
lsquic_stream_wantwrite(stream, 1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
lsquic_stream_shutdown(stream, 1);
|
|
|
|
lsquic_stream_wantread(stream, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
lsquic_stream_shutdown(stream, 1);
|
|
|
|
lsquic_stream_wantread(stream, 1);
|
2017-09-22 21:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
st_h->sh_flags |= HEADERS_SENT;
|
|
|
|
send_headers(st_h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-01-30 22:12:47 +00:00
|
|
|
static size_t
|
|
|
|
discard (void *ctx, const unsigned char *buf, size_t sz, int fin)
|
|
|
|
{
|
2020-09-08 15:43:03 +00:00
|
|
|
lsquic_stream_ctx_t *st_h = ctx;
|
|
|
|
|
|
|
|
if (st_h->sh_flags & ABANDON)
|
|
|
|
{
|
|
|
|
if (sz > st_h->sh_stop - st_h->sh_nread)
|
|
|
|
sz = st_h->sh_stop - st_h->sh_nread;
|
|
|
|
}
|
|
|
|
|
2020-01-30 22:12:47 +00:00
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-07 13:41:26 +00:00
|
|
|
static void
|
|
|
|
maybe_perform_priority_actions (struct lsquic_stream *stream,
|
|
|
|
lsquic_stream_ctx_t *st_h)
|
|
|
|
{
|
|
|
|
const lsquic_stream_id_t stream_id = lsquic_stream_id(stream);
|
|
|
|
struct priority_spec *spec;
|
|
|
|
unsigned n_active;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
n_active = 0;
|
|
|
|
for (spec = s_priority_specs; spec < s_priority_specs + s_n_prio_specs;
|
|
|
|
++spec)
|
|
|
|
{
|
|
|
|
if ((spec->flags & PRIORITY_SPEC_ACTIVE)
|
|
|
|
&& spec->stream_id == stream_id
|
|
|
|
&& st_h->sh_nread >= spec->nread)
|
|
|
|
{
|
|
|
|
s = lsquic_stream_set_http_prio(stream, &spec->ehp);
|
|
|
|
if (s != 0)
|
|
|
|
{
|
|
|
|
LSQ_ERROR("could not apply priorities to stream %"PRIu64,
|
|
|
|
stream_id);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
spec->flags &= ~PRIORITY_SPEC_ACTIVE;
|
|
|
|
}
|
|
|
|
n_active += !!(spec->flags & PRIORITY_SPEC_ACTIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_active == 0)
|
|
|
|
s_priority_specs = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
static void
|
|
|
|
http_client_on_read (lsquic_stream_t *stream, lsquic_stream_ctx_t *st_h)
|
|
|
|
{
|
2018-02-26 21:01:16 +00:00
|
|
|
struct http_client_ctx *const client_ctx = st_h->client_ctx;
|
2018-08-28 13:59:47 +00:00
|
|
|
struct hset *hset;
|
2017-09-22 21:00:03 +00:00
|
|
|
ssize_t nread;
|
|
|
|
unsigned old_prio, new_prio;
|
|
|
|
unsigned char buf[0x200];
|
|
|
|
unsigned nreads = 0;
|
2018-03-12 22:25:01 +00:00
|
|
|
#ifdef WIN32
|
2018-05-16 19:48:43 +00:00
|
|
|
srand(GetTickCount());
|
2018-03-12 22:25:01 +00:00
|
|
|
#endif
|
|
|
|
|
2019-01-21 14:07:02 +00:00
|
|
|
do
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
2019-01-21 14:07:02 +00:00
|
|
|
if (g_header_bypass && !(st_h->sh_flags & PROCESSED_HEADERS))
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
2019-01-21 14:07:02 +00:00
|
|
|
hset = lsquic_stream_get_hset(stream);
|
|
|
|
if (!hset)
|
|
|
|
{
|
|
|
|
LSQ_ERROR("could not get header set from stream");
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
st_h->sh_ttfb = lsquic_time_now();
|
|
|
|
update_sample_stats(&s_stat_ttfb, st_h->sh_ttfb - st_h->sh_created);
|
|
|
|
if (s_discard_response)
|
|
|
|
LSQ_DEBUG("discard response: do not dump headers");
|
|
|
|
else
|
|
|
|
hset_dump(hset, stdout);
|
|
|
|
hset_destroy(hset);
|
|
|
|
st_h->sh_flags |= PROCESSED_HEADERS;
|
2018-08-28 13:59:47 +00:00
|
|
|
}
|
2020-01-30 22:12:47 +00:00
|
|
|
else if (nread = (s_discard_response
|
2020-09-08 15:43:03 +00:00
|
|
|
? lsquic_stream_readf(stream, discard, st_h)
|
|
|
|
: lsquic_stream_read(stream, buf,
|
|
|
|
st_h->sh_flags & ABANDON
|
|
|
|
? MIN(sizeof(buf), st_h->sh_nread - st_h->sh_stop)
|
|
|
|
: sizeof(buf))),
|
2020-01-30 22:12:47 +00:00
|
|
|
nread > 0)
|
2017-09-22 21:00:03 +00:00
|
|
|
{
|
2020-09-08 15:43:03 +00:00
|
|
|
st_h->sh_nread += (size_t) nread;
|
2019-01-16 20:13:59 +00:00
|
|
|
s_stat_downloaded_bytes += nread;
|
2019-09-11 15:27:58 +00:00
|
|
|
/* test stream_reset after some number of read bytes */
|
|
|
|
if (client_ctx->hcc_reset_after_nbytes &&
|
|
|
|
s_stat_downloaded_bytes > client_ctx->hcc_reset_after_nbytes)
|
|
|
|
{
|
|
|
|
lsquic_stream_reset(stream, 0x1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* test retire_cid after some number of read bytes */
|
|
|
|
if (client_ctx->hcc_retire_cid_after_nbytes &&
|
|
|
|
s_stat_downloaded_bytes > client_ctx->hcc_retire_cid_after_nbytes)
|
|
|
|
{
|
|
|
|
lsquic_conn_retire_cid(lsquic_stream_conn(stream));
|
2020-01-28 14:35:09 +00:00
|
|
|
client_ctx->hcc_retire_cid_after_nbytes = 0;
|
2019-09-11 15:27:58 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-01-16 20:13:59 +00:00
|
|
|
if (!g_header_bypass && !(st_h->sh_flags & PROCESSED_HEADERS))
|
|
|
|
{
|
|
|
|
/* First read is assumed to be the first byte */
|
|
|
|
st_h->sh_ttfb = lsquic_time_now();
|
|
|
|
update_sample_stats(&s_stat_ttfb,
|
|
|
|
st_h->sh_ttfb - st_h->sh_created);
|
|
|
|
st_h->sh_flags |= PROCESSED_HEADERS;
|
|
|
|
}
|
|
|
|
if (!s_discard_response)
|
2019-09-11 15:27:58 +00:00
|
|
|
fwrite(buf, 1, nread, stdout);
|
2017-09-22 21:00:03 +00:00
|
|
|
if (randomly_reprioritize_streams && (st_h->count++ & 0x3F) == 0)
|
|
|
|
{
|
2020-10-07 13:41:26 +00:00
|
|
|
if ((1 << lsquic_conn_quic_version(lsquic_stream_conn(stream)))
|
|
|
|
& LSQUIC_IETF_VERSIONS)
|
|
|
|
{
|
|
|
|
struct lsquic_ext_http_prio ehp;
|
|
|
|
if (0 == lsquic_stream_get_http_prio(stream, &ehp))
|
|
|
|
{
|
|
|
|
ehp.urgency = 7 & (ehp.urgency + 1);
|
|
|
|
ehp.incremental = !ehp.incremental;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
const int s =
|
|
|
|
#endif
|
|
|
|
lsquic_stream_set_http_prio(stream, &ehp);
|
|
|
|
assert(s == 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
old_prio = lsquic_stream_priority(stream);
|
|
|
|
new_prio = 1 + (random() & 0xFF);
|
2018-02-26 21:01:16 +00:00
|
|
|
#ifndef NDEBUG
|
2020-10-07 13:41:26 +00:00
|
|
|
const int s =
|
2018-02-26 21:01:16 +00:00
|
|
|
#endif
|
2020-10-07 13:41:26 +00:00
|
|
|
lsquic_stream_set_priority(stream, new_prio);
|
|
|
|
assert(s == 0);
|
|
|
|
LSQ_DEBUG("changed stream %"PRIu64" priority from %u to %u",
|
2017-09-22 21:00:03 +00:00
|
|
|
lsquic_stream_id(stream), old_prio, new_prio);
|
2020-10-07 13:41:26 +00:00
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
}
|
2020-10-07 13:41:26 +00:00
|
|
|
if (s_priority_specs)
|
|
|
|
maybe_perform_priority_actions(stream, st_h);
|
2020-09-08 15:43:03 +00:00
|
|
|
if ((st_h->sh_flags & ABANDON) && st_h->sh_nread >= st_h->sh_stop)
|
|
|
|
{
|
|
|
|
LSQ_DEBUG("closing stream early having read %zd bytes",
|
|
|
|
st_h->sh_nread);
|
|
|
|
lsquic_stream_close(stream);
|
|
|
|
break;
|
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
}
|
|
|
|
else if (0 == nread)
|
|
|
|
{
|
2019-01-16 20:13:59 +00:00
|
|
|
update_sample_stats(&s_stat_req, lsquic_time_now() - st_h->sh_ttfb);
|
2018-02-26 21:01:16 +00:00
|
|
|
client_ctx->hcc_flags |= HCC_SEEN_FIN;
|
2017-09-22 21:00:03 +00:00
|
|
|
lsquic_stream_shutdown(stream, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (client_ctx->prog->prog_settings.es_rw_once && EWOULDBLOCK == errno)
|
|
|
|
{
|
|
|
|
LSQ_NOTICE("emptied the buffer in 'once' mode");
|
|
|
|
break;
|
|
|
|
}
|
2019-09-11 15:27:58 +00:00
|
|
|
else if (lsquic_stream_is_rejected(stream))
|
|
|
|
{
|
|
|
|
LSQ_NOTICE("stream was rejected");
|
|
|
|
lsquic_stream_close(stream);
|
|
|
|
break;
|
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
LSQ_ERROR("could not read: %s", strerror(errno));
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (client_ctx->prog->prog_settings.es_rw_once
|
|
|
|
&& nreads++ < 3 /* Emulate just a few reads */);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
http_client_on_close (lsquic_stream_t *stream, lsquic_stream_ctx_t *st_h)
|
|
|
|
{
|
|
|
|
const int pushed = lsquic_stream_is_pushed(stream);
|
|
|
|
if (pushed)
|
|
|
|
{
|
|
|
|
assert(NULL == st_h);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LSQ_INFO("%s called", __func__);
|
2019-01-09 22:17:38 +00:00
|
|
|
struct http_client_ctx *const client_ctx = st_h->client_ctx;
|
2017-09-22 21:00:03 +00:00
|
|
|
lsquic_conn_t *conn = lsquic_stream_conn(stream);
|
2020-09-08 15:43:03 +00:00
|
|
|
lsquic_conn_ctx_t *const conn_h = lsquic_conn_get_ctx(conn);
|
2017-09-22 21:00:03 +00:00
|
|
|
--conn_h->ch_n_reqs;
|
2019-01-09 22:17:38 +00:00
|
|
|
--conn_h->ch_n_cc_streams;
|
2017-09-22 21:00:03 +00:00
|
|
|
if (0 == conn_h->ch_n_reqs)
|
|
|
|
{
|
|
|
|
LSQ_INFO("all requests completed, closing connection");
|
|
|
|
lsquic_conn_close(conn_h->conn);
|
|
|
|
}
|
|
|
|
else
|
2019-01-09 22:17:38 +00:00
|
|
|
{
|
|
|
|
LSQ_INFO("%u active stream, %u request remain, creating %u new stream",
|
|
|
|
conn_h->ch_n_cc_streams,
|
|
|
|
conn_h->ch_n_reqs - conn_h->ch_n_cc_streams,
|
|
|
|
MIN((conn_h->ch_n_reqs - conn_h->ch_n_cc_streams),
|
|
|
|
(client_ctx->hcc_cc_reqs_per_conn - conn_h->ch_n_cc_streams)));
|
|
|
|
create_streams(client_ctx, conn_h);
|
|
|
|
}
|
Latest changes
- [API Change] Sendfile-like functionality is gone. The stream no
longer opens files and deals with file descriptors. (Among other
things, this makes the code more portable.) Three writing functions
are provided:
lsquic_stream_write
lsquic_stream_writev
lsquic_stream_writef (NEW)
lsquic_stream_writef() is given an abstract reader that has function
pointers for size() and read() functions which the user can implement.
This is the most flexible way. lsquic_stream_write() and
lsquic_stream_writev() are now both implemented as wrappers around
lsquic_stream_writef().
- [OPTIMIZATION] When writing to stream, be it within or without the
on_write() callback, place data directly into packet buffer,
bypassing auxiliary data structures. This reduces amount of memory
required, for the amount of data that can be written is limited
by the congestion window.
To support writes outside the on_write() callback, we keep N
outgoing packet buffers per connection which can be written to
by any stream. One half of these are reserved for the highest
priority stream(s), the other half for all other streams. This way,
low-priority streams cannot write instead of high-priority streams
and, on the other hand, low-priority streams get a chance to send
their packets out.
The algorithm is as follows:
- When user writes to stream outside of the callback:
- If this is the highest priority stream, place it onto the
reserved N/2 queue or fail.
(The actual size of this queue is dynamic -- MAX(N/2, CWND) --
rather than N/2, allowing high-priority streams to write as
much as can be sent.)
- If the stream is not the highest priority, try to place the
data onto the reserved N/2 queue or fail.
- When tick occurs *and* more packets can be scheduled:
- Transfer packets from the high N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for highest-priority streams,
placing resulting packets directly onto the scheduled queue.
- If more scheduling is allowed:
- Transfer packets from the low N/2 queue to the scheduled
queue.
- If more scheduling is allowed:
- Call on_write callbacks for non-highest-priority streams,
placing resulting packets directly onto the scheduled queue
The number N is currently 20, but it could be varied based on
resource usage.
- If stream is created due to incoming headers, make headers readable
from on_new.
- Outgoing packets are no longer marked non-writeable to prevent placing
more than one STREAM frame from the same stream into a single packet.
This property is maintained via code flow and an explicit check.
Packets for stream data are allocated using a special function.
- STREAM frame elision is cheaper, as we only perform it if a reset
stream has outgoing packets referencing it.
- lsquic_packet_out_t is smaller, as stream_rec elements are now
inside a union.
2017-10-31 13:35:58 +00:00
|
|
|
if (st_h->reader.lsqr_ctx)
|
|
|
|
destroy_lsquic_reader_ctx(st_h->reader.lsqr_ctx);
|
2017-09-22 21:00:03 +00:00
|
|
|
free(st_h);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
static struct lsquic_stream_if http_client_if = {
|
2017-09-22 21:00:03 +00:00
|
|
|
.on_new_conn = http_client_on_new_conn,
|
|
|
|
.on_conn_closed = http_client_on_conn_closed,
|
|
|
|
.on_new_stream = http_client_on_new_stream,
|
|
|
|
.on_read = http_client_on_read,
|
|
|
|
.on_write = http_client_on_write,
|
|
|
|
.on_close = http_client_on_close,
|
2018-05-21 19:02:33 +00:00
|
|
|
.on_hsk_done = http_client_on_hsk_done,
|
2017-09-22 21:00:03 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
usage (const char *prog)
|
|
|
|
{
|
|
|
|
const char *const slash = strrchr(prog, '/');
|
|
|
|
if (slash)
|
|
|
|
prog = slash + 1;
|
|
|
|
printf(
|
|
|
|
"Usage: %s [opts]\n"
|
|
|
|
"\n"
|
|
|
|
"Options:\n"
|
2019-02-18 13:40:51 +00:00
|
|
|
" -p PATH Path to request. May be specified more than once. If no\n"
|
|
|
|
" path is specified, the connection is closed as soon as\n"
|
|
|
|
" handshake succeeds.\n"
|
2017-09-22 21:00:03 +00:00
|
|
|
" -n CONNS Number of concurrent connections. Defaults to 1.\n"
|
|
|
|
" -r NREQS Total number of requests to send. Defaults to 1.\n"
|
2019-01-09 22:17:38 +00:00
|
|
|
" -R MAXREQS Maximum number of requests per single connection. Some\n"
|
2017-09-22 21:00:03 +00:00
|
|
|
" connections will have fewer requests than this.\n"
|
2019-01-09 22:17:38 +00:00
|
|
|
" -w CONCUR Number of concurrent requests per single connection.\n"
|
|
|
|
" Defaults to 1.\n"
|
2019-09-11 15:27:58 +00:00
|
|
|
" -M METHOD Method. Defaults to GET.\n"
|
2017-09-22 21:00:03 +00:00
|
|
|
" -P PAYLOAD Name of the file that contains payload to be used in the\n"
|
|
|
|
" request. This adds two more headers to the request:\n"
|
|
|
|
" content-type: application/octet-stream and\n"
|
|
|
|
" content-length\n"
|
|
|
|
" -K Discard server response\n"
|
2018-02-26 21:01:16 +00:00
|
|
|
" -I Abort on incomplete reponse from server\n"
|
2018-05-16 19:48:43 +00:00
|
|
|
" -4 Prefer IPv4 when resolving hostname\n"
|
|
|
|
" -6 Prefer IPv6 when resolving hostname\n"
|
2019-02-04 13:59:11 +00:00
|
|
|
" -0 FILE Provide RTT info file (reading or writing)\n"
|
2018-08-17 15:44:54 +00:00
|
|
|
#ifndef WIN32
|
|
|
|
" -C DIR Certificate store. If specified, server certificate will\n"
|
|
|
|
" be verified.\n"
|
2019-01-09 22:17:38 +00:00
|
|
|
#endif
|
2019-01-16 20:13:59 +00:00
|
|
|
" -a Display server certificate chain after successful handshake.\n"
|
2019-09-11 15:27:58 +00:00
|
|
|
" -b N_BYTES Send RESET_STREAM frame after the client has read n bytes.\n"
|
2019-01-16 20:13:59 +00:00
|
|
|
" -t Print stats to stdout.\n"
|
|
|
|
" -T FILE Print stats to FILE. If FILE is -, print stats to stdout.\n"
|
2019-09-11 15:27:58 +00:00
|
|
|
" -q FILE QIF mode: issue requests from the QIF file and validate\n"
|
|
|
|
" server responses.\n"
|
2020-01-28 14:35:09 +00:00
|
|
|
" -e TOKEN Hexadecimal string representing resume token.\n"
|
2020-09-08 15:43:03 +00:00
|
|
|
" -3 MAX Close stream after reading at most MAX bytes. The actual\n"
|
|
|
|
" number of bytes read is randominzed.\n"
|
2020-10-07 13:41:26 +00:00
|
|
|
" -9 SPEC Priority specification. May be specified several times.\n"
|
|
|
|
" SPEC takes the form stream_id:nread:UI, where U is\n"
|
|
|
|
" urgency and I is incremental. Matched \\d+:\\d+:[0-7][01]\n"
|
2017-09-22 21:00:03 +00:00
|
|
|
, prog);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-17 15:44:54 +00:00
|
|
|
#ifndef WIN32
|
|
|
|
static X509_STORE *store;
|
|
|
|
|
|
|
|
/* Windows does not have regex... */
|
|
|
|
static int
|
|
|
|
ends_in_pem (const char *s)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = strlen(s);
|
|
|
|
|
|
|
|
return len >= 4
|
|
|
|
&& 0 == strcasecmp(s + len - 4, ".pem");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static X509 *
|
|
|
|
file2cert (const char *path)
|
|
|
|
{
|
|
|
|
X509 *cert = NULL;
|
|
|
|
BIO *in;
|
|
|
|
|
|
|
|
in = BIO_new(BIO_s_file());
|
|
|
|
if (!in)
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
if (BIO_read_filename(in, path) <= 0)
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
cert = PEM_read_bio_X509_AUX(in, NULL, NULL, NULL);
|
|
|
|
|
|
|
|
end:
|
|
|
|
BIO_free(in);
|
|
|
|
return cert;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
init_x509_cert_store (const char *path)
|
|
|
|
{
|
|
|
|
struct dirent *ent;
|
|
|
|
X509 *cert;
|
|
|
|
DIR *dir;
|
|
|
|
char file_path[NAME_MAX];
|
2019-02-04 13:59:11 +00:00
|
|
|
int ret;
|
2018-08-17 15:44:54 +00:00
|
|
|
|
|
|
|
dir = opendir(path);
|
|
|
|
if (!dir)
|
|
|
|
{
|
|
|
|
LSQ_WARN("Cannot open directory `%s': %s", path, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
store = X509_STORE_new();
|
|
|
|
|
|
|
|
while ((ent = readdir(dir)))
|
|
|
|
{
|
|
|
|
if (ends_in_pem(ent->d_name))
|
|
|
|
{
|
2019-02-04 13:59:11 +00:00
|
|
|
ret = snprintf(file_path, sizeof(file_path), "%s/%s",
|
|
|
|
path, ent->d_name);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
LSQ_WARN("file_path formatting error %s", strerror(errno));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else if ((unsigned)ret >= sizeof(file_path))
|
|
|
|
{
|
|
|
|
LSQ_WARN("file_path was truncated %s", strerror(errno));
|
|
|
|
continue;
|
|
|
|
}
|
2018-08-17 15:44:54 +00:00
|
|
|
cert = file2cert(file_path);
|
|
|
|
if (cert)
|
|
|
|
{
|
|
|
|
if (1 != X509_STORE_add_cert(store, cert))
|
|
|
|
LSQ_WARN("could not add cert from %s", file_path);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
LSQ_WARN("could not read cert from %s", file_path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(void) closedir(dir);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
verify_server_cert (void *ctx, STACK_OF(X509) *chain)
|
|
|
|
{
|
|
|
|
X509_STORE_CTX store_ctx;
|
|
|
|
X509 *cert;
|
|
|
|
int ver;
|
|
|
|
|
|
|
|
if (!store)
|
|
|
|
{
|
|
|
|
if (0 != init_x509_cert_store(ctx))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cert = sk_X509_shift(chain);
|
|
|
|
X509_STORE_CTX_init(&store_ctx, store, cert, chain);
|
|
|
|
|
|
|
|
ver = X509_verify_cert(&store_ctx);
|
|
|
|
|
|
|
|
X509_STORE_CTX_cleanup(&store_ctx);
|
|
|
|
|
|
|
|
if (ver != 1)
|
|
|
|
LSQ_WARN("could not verify server certificate");
|
|
|
|
|
|
|
|
return ver == 1 ? 0 : -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2018-08-28 13:59:47 +00:00
|
|
|
static void *
|
2020-03-30 17:34:43 +00:00
|
|
|
hset_create (void *hsi_ctx, lsquic_stream_t *stream, int is_push_promise)
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
|
|
|
struct hset *hset;
|
|
|
|
|
2020-07-22 17:55:43 +00:00
|
|
|
if ((hset = malloc(sizeof(*hset))))
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
|
|
|
STAILQ_INIT(hset);
|
|
|
|
return hset;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-03-12 12:41:53 +00:00
|
|
|
static struct lsxpack_header *
|
|
|
|
hset_prepare_decode (void *hset_p, struct lsxpack_header *xhdr,
|
2020-03-13 15:24:36 +00:00
|
|
|
size_t req_space)
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
2020-03-12 12:41:53 +00:00
|
|
|
struct hset *const hset = hset_p;
|
2018-08-28 13:59:47 +00:00
|
|
|
struct hset_elem *el;
|
2020-03-30 17:34:43 +00:00
|
|
|
char *buf;
|
2018-08-28 13:59:47 +00:00
|
|
|
|
2020-03-13 15:24:36 +00:00
|
|
|
if (0 == req_space)
|
|
|
|
req_space = 0x100;
|
2020-03-12 12:41:53 +00:00
|
|
|
|
2020-03-13 15:24:36 +00:00
|
|
|
if (req_space > LSXPACK_MAX_STRLEN)
|
2020-03-12 12:41:53 +00:00
|
|
|
{
|
|
|
|
LSQ_WARN("requested space for header is too large: %zd bytes",
|
2020-03-13 15:24:36 +00:00
|
|
|
req_space);
|
2020-03-12 12:41:53 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!xhdr)
|
|
|
|
{
|
2020-03-30 17:34:43 +00:00
|
|
|
buf = malloc(req_space);
|
|
|
|
if (!buf)
|
|
|
|
{
|
|
|
|
LSQ_WARN("cannot allocate buf of %zd bytes", req_space);
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-03-12 12:41:53 +00:00
|
|
|
el = malloc(sizeof(*el));
|
|
|
|
if (!el)
|
|
|
|
{
|
|
|
|
LSQ_WARN("cannot allocate hset_elem");
|
2020-03-30 17:34:43 +00:00
|
|
|
free(buf);
|
2020-03-12 12:41:53 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
STAILQ_INSERT_TAIL(hset, el, next);
|
2020-03-30 17:34:43 +00:00
|
|
|
lsxpack_header_prepare_decode(&el->xhdr, buf, 0, req_space);
|
2020-05-27 14:26:32 +00:00
|
|
|
el->nalloc = req_space;
|
2020-03-12 12:41:53 +00:00
|
|
|
}
|
2020-03-12 13:22:29 +00:00
|
|
|
else
|
2020-03-13 15:24:36 +00:00
|
|
|
{
|
2020-03-12 13:22:29 +00:00
|
|
|
el = (struct hset_elem *) ((char *) xhdr
|
|
|
|
- offsetof(struct hset_elem, xhdr));
|
2020-05-27 14:26:32 +00:00
|
|
|
if (req_space <= el->nalloc)
|
2020-03-13 15:24:36 +00:00
|
|
|
{
|
|
|
|
LSQ_ERROR("requested space is smaller than already allocated");
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-05-27 14:26:32 +00:00
|
|
|
if (req_space < el->nalloc * 2)
|
|
|
|
req_space = el->nalloc * 2;
|
2020-03-30 17:34:43 +00:00
|
|
|
buf = realloc(el->xhdr.buf, req_space);
|
|
|
|
if (!buf)
|
2020-03-12 12:41:53 +00:00
|
|
|
{
|
2020-03-30 17:34:43 +00:00
|
|
|
LSQ_WARN("cannot reallocate hset buf");
|
2020-03-12 12:41:53 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-03-30 17:34:43 +00:00
|
|
|
el->xhdr.buf = buf;
|
|
|
|
el->xhdr.val_len = req_space;
|
2020-05-27 14:26:32 +00:00
|
|
|
el->nalloc = req_space;
|
2020-03-12 12:41:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &el->xhdr;
|
|
|
|
}
|
2018-08-28 13:59:47 +00:00
|
|
|
|
|
|
|
|
2020-03-12 12:41:53 +00:00
|
|
|
static int
|
|
|
|
hset_add_header (void *hset_p, struct lsxpack_header *xhdr)
|
|
|
|
{
|
|
|
|
unsigned name_len, value_len;
|
|
|
|
/* Not much to do: the header value are in xhdr */
|
|
|
|
|
|
|
|
if (xhdr)
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
2020-03-12 12:41:53 +00:00
|
|
|
name_len = xhdr->name_len;
|
|
|
|
value_len = xhdr->val_len;
|
|
|
|
s_stat_downloaded_bytes += name_len + value_len + 4; /* ": \r\n" */
|
2018-08-28 13:59:47 +00:00
|
|
|
}
|
2020-03-12 12:41:53 +00:00
|
|
|
else
|
|
|
|
s_stat_downloaded_bytes += 2; /* \r\n "*/
|
2018-08-28 13:59:47 +00:00
|
|
|
|
2020-03-12 12:41:53 +00:00
|
|
|
return 0;
|
2018-08-28 13:59:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
hset_destroy (void *hset_p)
|
|
|
|
{
|
|
|
|
struct hset *hset = hset_p;
|
|
|
|
struct hset_elem *el, *next;
|
|
|
|
|
2020-07-22 17:55:43 +00:00
|
|
|
for (el = STAILQ_FIRST(hset); el; el = next)
|
2018-08-28 13:59:47 +00:00
|
|
|
{
|
2020-07-22 17:55:43 +00:00
|
|
|
next = STAILQ_NEXT(el, next);
|
|
|
|
free(el->xhdr.buf);
|
|
|
|
free(el);
|
2018-08-28 13:59:47 +00:00
|
|
|
}
|
2020-07-22 17:55:43 +00:00
|
|
|
free(hset);
|
2018-08-28 13:59:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
hset_dump (const struct hset *hset, FILE *out)
|
|
|
|
{
|
|
|
|
const struct hset_elem *el;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(el, hset, next)
|
2020-06-03 04:13:30 +00:00
|
|
|
if (el->xhdr.flags & (LSXPACK_HPACK_VAL_MATCHED|LSXPACK_QPACK_IDX))
|
2020-03-12 12:41:53 +00:00
|
|
|
fprintf(out, "%.*s (%s static table idx %u): %.*s\n",
|
|
|
|
(int) el->xhdr.name_len, lsxpack_header_get_name(&el->xhdr),
|
2020-06-03 04:13:30 +00:00
|
|
|
el->xhdr.flags & LSXPACK_HPACK_VAL_MATCHED ? "hpack" : "qpack",
|
|
|
|
el->xhdr.flags & LSXPACK_HPACK_VAL_MATCHED ? el->xhdr.hpack_index
|
2020-03-12 12:41:53 +00:00
|
|
|
: el->xhdr.qpack_index,
|
|
|
|
(int) el->xhdr.val_len, lsxpack_header_get_value(&el->xhdr));
|
2018-08-28 13:59:47 +00:00
|
|
|
else
|
2020-03-12 12:41:53 +00:00
|
|
|
fprintf(out, "%.*s: %.*s\n",
|
|
|
|
(int) el->xhdr.name_len, lsxpack_header_get_name(&el->xhdr),
|
|
|
|
(int) el->xhdr.val_len, lsxpack_header_get_value(&el->xhdr));
|
2018-08-28 13:59:47 +00:00
|
|
|
|
|
|
|
fprintf(out, "\n");
|
|
|
|
fflush(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* These are basic and for illustration purposes only. You will want to
|
|
|
|
* do your own verification by doing something similar to what is done
|
|
|
|
* in src/liblsquic/lsquic_http1x_if.c
|
|
|
|
*/
|
|
|
|
static const struct lsquic_hset_if header_bypass_api =
|
|
|
|
{
|
|
|
|
.hsi_create_header_set = hset_create,
|
2020-03-12 12:41:53 +00:00
|
|
|
.hsi_prepare_decode = hset_prepare_decode,
|
2018-08-28 13:59:47 +00:00
|
|
|
.hsi_process_header = hset_add_header,
|
|
|
|
.hsi_discard_header_set = hset_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2019-01-16 20:13:59 +00:00
|
|
|
static void
|
|
|
|
display_stat (FILE *out, const struct sample_stats *stats, const char *name)
|
|
|
|
{
|
|
|
|
long double mean, stddev;
|
|
|
|
|
|
|
|
calc_sample_stats(stats, &mean, &stddev);
|
|
|
|
fprintf(out, "%s: n: %u; min: %.2Lf ms; max: %.2Lf ms; mean: %.2Lf ms; "
|
|
|
|
"sd: %.2Lf ms\n", name, stats->n, (long double) stats->min / 1000,
|
|
|
|
(long double) stats->max / 1000, mean / 1000, stddev / 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
static lsquic_conn_ctx_t *
|
|
|
|
qif_client_on_new_conn (void *stream_if_ctx, lsquic_conn_t *conn)
|
|
|
|
{
|
|
|
|
lsquic_conn_make_stream(conn);
|
|
|
|
return stream_if_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qif_client_on_conn_closed (lsquic_conn_t *conn)
|
|
|
|
{
|
|
|
|
struct http_client_ctx *client_ctx = (void *) lsquic_conn_get_ctx(conn);
|
|
|
|
LSQ_INFO("connection is closed: stop engine");
|
|
|
|
prog_stop(client_ctx->prog);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct qif_stream_ctx
|
|
|
|
{
|
|
|
|
int reqno;
|
|
|
|
struct lsquic_http_headers headers;
|
|
|
|
char *qif_str;
|
|
|
|
size_t qif_sz;
|
|
|
|
size_t qif_off;
|
|
|
|
char *resp_str; /* qif_sz allocated */
|
|
|
|
size_t resp_off; /* Read so far */
|
|
|
|
enum {
|
|
|
|
QSC_HEADERS_SENT = 1 << 0,
|
|
|
|
QSC_GOT_HEADERS = 1 << 1,
|
|
|
|
} flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
|
|
|
|
|
|
lsquic_stream_ctx_t *
|
|
|
|
qif_client_on_new_stream (void *stream_if_ctx, lsquic_stream_t *stream)
|
|
|
|
{
|
|
|
|
struct http_client_ctx *const client_ctx = stream_if_ctx;
|
|
|
|
FILE *const fh = client_ctx->qif_fh;
|
|
|
|
struct qif_stream_ctx *ctx;
|
2020-03-30 17:34:43 +00:00
|
|
|
struct lsxpack_header *header;
|
2019-09-11 15:27:58 +00:00
|
|
|
static int reqno;
|
|
|
|
size_t nalloc;
|
|
|
|
char *end, *tab, *line;
|
|
|
|
char line_buf[0x1000];
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx)
|
|
|
|
{
|
|
|
|
perror("calloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
ctx->reqno = reqno++;
|
|
|
|
|
|
|
|
nalloc = 0;
|
|
|
|
while ((line = fgets(line_buf, sizeof(line_buf), fh)))
|
|
|
|
{
|
|
|
|
end = strchr(line, '\n');
|
|
|
|
if (!end)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "no newline\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end == line)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (*line == '#')
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tab = strchr(line, '\t');
|
|
|
|
if (!tab)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "no TAB\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nalloc + (end + 1 - line) > ctx->qif_sz)
|
|
|
|
{
|
|
|
|
if (nalloc)
|
|
|
|
nalloc = MAX(nalloc * 2, nalloc + (end + 1 - line));
|
|
|
|
else
|
|
|
|
nalloc = end + 1 - line;
|
|
|
|
ctx->qif_str = realloc(ctx->qif_str, nalloc);
|
|
|
|
if (!ctx->qif_str)
|
|
|
|
{
|
|
|
|
perror("realloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memcpy(ctx->qif_str + ctx->qif_sz, line, end + 1 - line);
|
|
|
|
|
|
|
|
ctx->headers.headers = realloc(ctx->headers.headers,
|
|
|
|
sizeof(ctx->headers.headers[0]) * (ctx->headers.count + 1));
|
|
|
|
if (!ctx->headers.headers)
|
|
|
|
{
|
|
|
|
perror("realloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
header = &ctx->headers.headers[ctx->headers.count++];
|
2020-06-03 04:13:30 +00:00
|
|
|
lsxpack_header_set_offset2(header, ctx->qif_str + ctx->qif_sz, 0,
|
|
|
|
tab - line, tab - line + 1, end - tab - 1);
|
2019-09-11 15:27:58 +00:00
|
|
|
ctx->qif_sz += end + 1 - line;
|
|
|
|
}
|
|
|
|
|
|
|
|
lsquic_stream_wantwrite(stream, 1);
|
|
|
|
|
|
|
|
if (!line)
|
|
|
|
{
|
|
|
|
LSQ_DEBUG("Input QIF file ends; close file handle");
|
|
|
|
fclose(client_ctx->qif_fh);
|
|
|
|
client_ctx->qif_fh = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (void *) ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qif_client_on_write (struct lsquic_stream *stream, lsquic_stream_ctx_t *h)
|
|
|
|
{
|
|
|
|
struct qif_stream_ctx *const ctx = (void *) h;
|
|
|
|
size_t towrite;
|
|
|
|
ssize_t nw;
|
|
|
|
|
|
|
|
if (ctx->flags & QSC_HEADERS_SENT)
|
|
|
|
{
|
|
|
|
towrite = ctx->qif_sz - ctx->qif_off;
|
|
|
|
nw = lsquic_stream_write(stream, ctx->qif_str + ctx->qif_off, towrite);
|
|
|
|
if (nw >= 0)
|
|
|
|
{
|
|
|
|
LSQ_DEBUG("wrote %zd bytes to stream", nw);
|
|
|
|
ctx->qif_off += nw;
|
|
|
|
if (ctx->qif_off == (size_t) nw)
|
|
|
|
{
|
|
|
|
lsquic_stream_shutdown(stream, 1);
|
|
|
|
lsquic_stream_wantread(stream, 1);
|
|
|
|
LSQ_DEBUG("finished writing request %d", ctx->reqno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LSQ_ERROR("cannot write to stream: %s", strerror(errno));
|
|
|
|
lsquic_stream_wantwrite(stream, 0);
|
|
|
|
lsquic_conn_abort(lsquic_stream_conn(stream));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (0 == lsquic_stream_send_headers(stream, &ctx->headers, 0))
|
|
|
|
{
|
|
|
|
ctx->flags |= QSC_HEADERS_SENT;
|
|
|
|
LSQ_DEBUG("sent headers");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LSQ_ERROR("cannot send headers: %s", strerror(errno));
|
|
|
|
lsquic_stream_wantwrite(stream, 0);
|
|
|
|
lsquic_conn_abort(lsquic_stream_conn(stream));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qif_client_on_read (struct lsquic_stream *stream, lsquic_stream_ctx_t *h)
|
|
|
|
{
|
|
|
|
struct qif_stream_ctx *const ctx = (void *) h;
|
|
|
|
struct hset *hset;
|
|
|
|
ssize_t nr;
|
|
|
|
unsigned char buf[1];
|
|
|
|
|
|
|
|
LSQ_DEBUG("reading response to request %d", ctx->reqno);
|
|
|
|
|
|
|
|
if (!(ctx->flags & QSC_GOT_HEADERS))
|
|
|
|
{
|
|
|
|
hset = lsquic_stream_get_hset(stream);
|
|
|
|
if (!hset)
|
|
|
|
{
|
|
|
|
LSQ_ERROR("could not get header set from stream");
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
LSQ_DEBUG("got header set for response %d", ctx->reqno);
|
|
|
|
hset_dump(hset, stdout);
|
|
|
|
hset_destroy(hset);
|
|
|
|
ctx->flags |= QSC_GOT_HEADERS;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!ctx->resp_str)
|
|
|
|
{
|
|
|
|
ctx->resp_str = malloc(ctx->qif_sz);
|
|
|
|
if (!ctx->resp_str)
|
|
|
|
{
|
|
|
|
perror("malloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ctx->resp_off < ctx->qif_sz)
|
|
|
|
{
|
|
|
|
nr = lsquic_stream_read(stream, ctx->resp_str + ctx->resp_off,
|
|
|
|
ctx->qif_sz - ctx->resp_off);
|
|
|
|
if (nr > 0)
|
|
|
|
{
|
|
|
|
ctx->resp_off += nr;
|
|
|
|
LSQ_DEBUG("read %zd bytes of reponse %d", nr, ctx->reqno);
|
|
|
|
}
|
|
|
|
else if (nr == 0)
|
|
|
|
{
|
|
|
|
LSQ_INFO("response %d too short", ctx->reqno);
|
|
|
|
LSQ_WARN("response %d FAIL", ctx->reqno);
|
|
|
|
lsquic_stream_shutdown(stream, 0);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LSQ_ERROR("error reading from stream");
|
|
|
|
lsquic_stream_wantread(stream, 0);
|
|
|
|
lsquic_conn_abort(lsquic_stream_conn(stream));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Collect EOF */
|
|
|
|
nr = lsquic_stream_read(stream, buf, sizeof(buf));
|
|
|
|
if (nr == 0)
|
|
|
|
{
|
|
|
|
if (0 == memcmp(ctx->qif_str, ctx->resp_str, ctx->qif_sz))
|
|
|
|
LSQ_INFO("response %d OK", ctx->reqno);
|
|
|
|
else
|
|
|
|
LSQ_WARN("response %d FAIL", ctx->reqno);
|
|
|
|
lsquic_stream_shutdown(stream, 0);
|
|
|
|
}
|
|
|
|
else if (nr > 0)
|
|
|
|
{
|
|
|
|
LSQ_INFO("response %d too long", ctx->reqno);
|
|
|
|
LSQ_WARN("response %d FAIL", ctx->reqno);
|
|
|
|
lsquic_stream_shutdown(stream, 0);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LSQ_ERROR("error reading from stream");
|
|
|
|
lsquic_stream_shutdown(stream, 0);
|
|
|
|
lsquic_conn_abort(lsquic_stream_conn(stream));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qif_client_on_close (struct lsquic_stream *stream, lsquic_stream_ctx_t *h)
|
|
|
|
{
|
|
|
|
struct lsquic_conn *conn = lsquic_stream_conn(stream);
|
|
|
|
struct http_client_ctx *client_ctx = (void *) lsquic_conn_get_ctx(conn);
|
|
|
|
struct qif_stream_ctx *const ctx = (void *) h;
|
|
|
|
free(ctx->qif_str);
|
|
|
|
free(ctx->resp_str);
|
|
|
|
free(ctx->headers.headers);
|
|
|
|
free(ctx);
|
|
|
|
if (client_ctx->qif_fh)
|
|
|
|
lsquic_conn_make_stream(conn);
|
|
|
|
else
|
|
|
|
lsquic_conn_close(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const struct lsquic_stream_if qif_client_if = {
|
|
|
|
.on_new_conn = qif_client_on_new_conn,
|
|
|
|
.on_conn_closed = qif_client_on_conn_closed,
|
|
|
|
.on_new_stream = qif_client_on_new_stream,
|
|
|
|
.on_read = qif_client_on_read,
|
|
|
|
.on_write = qif_client_on_write,
|
|
|
|
.on_close = qif_client_on_close,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
int
|
|
|
|
main (int argc, char **argv)
|
|
|
|
{
|
2019-09-11 15:27:58 +00:00
|
|
|
int opt, s, was_empty;
|
2019-01-16 20:13:59 +00:00
|
|
|
lsquic_time_t start_time;
|
|
|
|
FILE *stats_fh = NULL;
|
|
|
|
long double elapsed;
|
2017-09-22 21:00:03 +00:00
|
|
|
struct http_client_ctx client_ctx;
|
|
|
|
struct stat st;
|
|
|
|
struct path_elem *pe;
|
|
|
|
struct sport_head sports;
|
|
|
|
struct prog prog;
|
2019-09-11 15:27:58 +00:00
|
|
|
const char *token = NULL;
|
2020-10-07 13:41:26 +00:00
|
|
|
struct priority_spec *priority_specs = NULL;
|
2017-09-22 21:00:03 +00:00
|
|
|
|
|
|
|
TAILQ_INIT(&sports);
|
|
|
|
memset(&client_ctx, 0, sizeof(client_ctx));
|
|
|
|
TAILQ_INIT(&client_ctx.hcc_path_elems);
|
|
|
|
client_ctx.method = "GET";
|
|
|
|
client_ctx.hcc_concurrency = 1;
|
2019-01-09 22:17:38 +00:00
|
|
|
client_ctx.hcc_cc_reqs_per_conn = 1;
|
2017-09-22 21:00:03 +00:00
|
|
|
client_ctx.hcc_reqs_per_conn = 1;
|
|
|
|
client_ctx.hcc_total_n_reqs = 1;
|
2019-09-11 15:27:58 +00:00
|
|
|
client_ctx.hcc_reset_after_nbytes = 0;
|
|
|
|
client_ctx.hcc_retire_cid_after_nbytes = 0;
|
2017-09-22 21:00:03 +00:00
|
|
|
client_ctx.prog = &prog;
|
2018-03-12 22:25:01 +00:00
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
prog_init(&prog, LSENG_HTTP, &sports, &http_client_if, &client_ctx);
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
while (-1 != (opt = getopt(argc, argv, PROG_OPTS
|
|
|
|
"46Br:R:IKu:EP:M:n:w:H:p:0:q:e:hatT:b:d:"
|
2020-09-08 15:43:03 +00:00
|
|
|
"3:" /* 3 is 133+ for "e" ("e" for "early") */
|
2020-10-07 13:41:26 +00:00
|
|
|
"9:" /* 9 sort of looks like P... */
|
2018-08-17 15:44:54 +00:00
|
|
|
#ifndef WIN32
|
2019-09-11 15:27:58 +00:00
|
|
|
"C:"
|
2018-08-17 15:44:54 +00:00
|
|
|
#endif
|
|
|
|
)))
|
2017-09-22 21:00:03 +00:00
|
|
|
{
|
|
|
|
switch (opt) {
|
2019-01-16 20:13:59 +00:00
|
|
|
case 'a':
|
|
|
|
++s_display_cert_chain;
|
|
|
|
break;
|
2018-05-16 19:48:43 +00:00
|
|
|
case '4':
|
2018-05-16 16:14:02 +00:00
|
|
|
case '6':
|
2018-05-18 14:39:21 +00:00
|
|
|
prog.prog_ipver = opt - '0';
|
2018-05-16 16:14:02 +00:00
|
|
|
break;
|
2018-08-28 13:59:47 +00:00
|
|
|
case 'B':
|
|
|
|
g_header_bypass = 1;
|
|
|
|
prog.prog_api.ea_hsi_if = &header_bypass_api;
|
|
|
|
prog.prog_api.ea_hsi_ctx = NULL;
|
|
|
|
break;
|
2018-02-26 21:01:16 +00:00
|
|
|
case 'I':
|
|
|
|
client_ctx.hcc_flags |= HCC_ABORT_ON_INCOMPLETE;
|
|
|
|
break;
|
2017-09-22 21:00:03 +00:00
|
|
|
case 'K':
|
2019-01-16 20:13:59 +00:00
|
|
|
++s_discard_response;
|
2017-09-22 21:00:03 +00:00
|
|
|
break;
|
|
|
|
case 'u': /* Accept p<U>sh promise */
|
|
|
|
promise_fd = open(optarg, O_WRONLY|O_CREAT|O_TRUNC, 0644);
|
|
|
|
if (promise_fd < 0)
|
|
|
|
{
|
|
|
|
perror("open");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
prog.prog_settings.es_support_push = 1; /* Pokes into prog */
|
|
|
|
break;
|
|
|
|
case 'E': /* E: randomly reprioritize str<E>ams. Now, that's
|
|
|
|
* pretty random. :)
|
|
|
|
*/
|
2020-10-07 13:41:26 +00:00
|
|
|
srand((uintptr_t) argv);
|
2017-09-22 21:00:03 +00:00
|
|
|
randomly_reprioritize_streams = 1;
|
|
|
|
break;
|
|
|
|
case 'n':
|
|
|
|
client_ctx.hcc_concurrency = atoi(optarg);
|
|
|
|
break;
|
2019-01-09 22:17:38 +00:00
|
|
|
case 'w':
|
|
|
|
client_ctx.hcc_cc_reqs_per_conn = atoi(optarg);
|
|
|
|
break;
|
2017-09-22 21:00:03 +00:00
|
|
|
case 'P':
|
|
|
|
client_ctx.payload = optarg;
|
|
|
|
if (0 != stat(optarg, &st))
|
|
|
|
{
|
|
|
|
perror("stat");
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
sprintf(client_ctx.payload_size, "%jd", (intmax_t) st.st_size);
|
|
|
|
break;
|
|
|
|
case 'M':
|
|
|
|
client_ctx.method = optarg;
|
|
|
|
break;
|
|
|
|
case 'r':
|
|
|
|
client_ctx.hcc_total_n_reqs = atoi(optarg);
|
|
|
|
break;
|
|
|
|
case 'R':
|
|
|
|
client_ctx.hcc_reqs_per_conn = atoi(optarg);
|
|
|
|
break;
|
|
|
|
case 'H':
|
|
|
|
client_ctx.hostname = optarg;
|
2018-05-16 19:48:43 +00:00
|
|
|
prog.prog_hostname = optarg; /* Pokes into prog */
|
2017-09-22 21:00:03 +00:00
|
|
|
break;
|
|
|
|
case 'p':
|
|
|
|
pe = calloc(1, sizeof(*pe));
|
|
|
|
pe->path = optarg;
|
|
|
|
TAILQ_INSERT_TAIL(&client_ctx.hcc_path_elems, pe, next_pe);
|
|
|
|
break;
|
|
|
|
case 'h':
|
|
|
|
usage(argv[0]);
|
|
|
|
prog_print_common_options(&prog, stdout);
|
|
|
|
exit(0);
|
2019-09-11 15:27:58 +00:00
|
|
|
case 'q':
|
|
|
|
client_ctx.qif_file = optarg;
|
|
|
|
break;
|
|
|
|
case 'e':
|
|
|
|
if (TAILQ_EMPTY(&sports))
|
|
|
|
token = optarg;
|
|
|
|
else
|
|
|
|
sport_set_token(TAILQ_LAST(&sports, sport_head), optarg);
|
|
|
|
break;
|
2018-08-17 15:44:54 +00:00
|
|
|
#ifndef WIN32
|
|
|
|
case 'C':
|
|
|
|
prog.prog_api.ea_verify_cert = verify_server_cert;
|
|
|
|
prog.prog_api.ea_verify_ctx = optarg;
|
|
|
|
break;
|
|
|
|
#endif
|
2019-01-16 20:13:59 +00:00
|
|
|
case 't':
|
|
|
|
stats_fh = stdout;
|
|
|
|
break;
|
|
|
|
case 'T':
|
|
|
|
if (0 == strcmp(optarg, "-"))
|
|
|
|
stats_fh = stdout;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stats_fh = fopen(optarg, "w");
|
|
|
|
if (!stats_fh)
|
|
|
|
{
|
|
|
|
perror("fopen");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2019-09-11 15:27:58 +00:00
|
|
|
case 'b':
|
|
|
|
client_ctx.hcc_reset_after_nbytes = atoi(optarg);
|
|
|
|
break;
|
|
|
|
case 'd':
|
|
|
|
client_ctx.hcc_retire_cid_after_nbytes = atoi(optarg);
|
|
|
|
break;
|
2019-02-04 13:59:11 +00:00
|
|
|
case '0':
|
2020-07-06 21:35:21 +00:00
|
|
|
http_client_if.on_sess_resume_info = http_client_on_sess_resume_info;
|
|
|
|
client_ctx.hcc_sess_resume_file_name = optarg;
|
2019-02-04 13:59:11 +00:00
|
|
|
break;
|
2020-09-08 15:43:03 +00:00
|
|
|
case '3':
|
|
|
|
s_abandon_early = strtol(optarg, NULL, 10);
|
|
|
|
break;
|
2020-10-07 13:41:26 +00:00
|
|
|
case '9':
|
|
|
|
{
|
|
|
|
/* Parse priority spec and tack it onto the end of the array */
|
|
|
|
lsquic_stream_id_t stream_id;
|
|
|
|
size_t nread;
|
|
|
|
struct lsquic_ext_http_prio ehp;
|
|
|
|
struct priority_spec *new_specs;
|
|
|
|
stream_id = strtoull(optarg, &optarg, 10);
|
|
|
|
if (*optarg != ':')
|
|
|
|
exit(1);
|
|
|
|
++optarg;
|
|
|
|
nread = strtoull(optarg, &optarg, 10);
|
|
|
|
if (*optarg != ':')
|
|
|
|
exit(1);
|
|
|
|
++optarg;
|
|
|
|
if (!(*optarg >= '0' && *optarg <= '7'))
|
|
|
|
exit(1);
|
|
|
|
ehp.urgency = *optarg++ - '0';
|
|
|
|
if (!(*optarg >= '0' && *optarg <= '1'))
|
|
|
|
exit(1);
|
|
|
|
ehp.incremental = *optarg++ - '0';
|
|
|
|
++s_n_prio_specs;
|
|
|
|
new_specs = realloc(priority_specs,
|
|
|
|
sizeof(priority_specs[0]) * s_n_prio_specs);
|
|
|
|
if (!new_specs)
|
|
|
|
{
|
|
|
|
perror("malloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
priority_specs = new_specs;
|
|
|
|
priority_specs[s_n_prio_specs - 1] = (struct priority_spec) {
|
|
|
|
.flags = PRIORITY_SPEC_ACTIVE,
|
|
|
|
.stream_id = stream_id,
|
|
|
|
.nread = nread,
|
|
|
|
.ehp = ehp,
|
|
|
|
};
|
|
|
|
s_priority_specs = priority_specs;
|
|
|
|
break;
|
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
default:
|
|
|
|
if (0 != prog_set_opt(&prog, opt, optarg))
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:13:59 +00:00
|
|
|
#if LSQUIC_CONN_STATS
|
|
|
|
prog.prog_api.ea_stats_fh = stats_fh;
|
|
|
|
#endif
|
2019-10-11 12:24:24 +00:00
|
|
|
prog.prog_settings.es_ua = LITESPEED_ID;
|
2019-01-16 20:13:59 +00:00
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
if (client_ctx.qif_file)
|
2019-02-04 13:59:11 +00:00
|
|
|
{
|
2019-09-11 15:27:58 +00:00
|
|
|
client_ctx.qif_fh = fopen(client_ctx.qif_file, "r");
|
|
|
|
if (!client_ctx.qif_fh)
|
2019-02-04 13:59:11 +00:00
|
|
|
{
|
2019-09-11 15:27:58 +00:00
|
|
|
fprintf(stderr, "Cannot open %s for reading: %s\n",
|
|
|
|
client_ctx.qif_file, strerror(errno));
|
|
|
|
exit(1);
|
2019-02-04 13:59:11 +00:00
|
|
|
}
|
2019-09-11 15:27:58 +00:00
|
|
|
LSQ_NOTICE("opened QIF file %s for reading\n", client_ctx.qif_file);
|
|
|
|
prog.prog_api.ea_stream_if = &qif_client_if;
|
|
|
|
g_header_bypass = 1;
|
|
|
|
prog.prog_api.ea_hsi_if = &header_bypass_api;
|
|
|
|
prog.prog_api.ea_hsi_ctx = NULL;
|
|
|
|
}
|
|
|
|
else if (TAILQ_EMPTY(&client_ctx.hcc_path_elems))
|
|
|
|
{
|
|
|
|
fprintf(stderr, "Specify at least one path using -p option\n");
|
|
|
|
exit(1);
|
2019-02-04 13:59:11 +00:00
|
|
|
}
|
2017-09-22 21:00:03 +00:00
|
|
|
|
2019-01-16 20:13:59 +00:00
|
|
|
start_time = lsquic_time_now();
|
2019-09-11 15:27:58 +00:00
|
|
|
was_empty = TAILQ_EMPTY(&sports);
|
2017-09-22 21:00:03 +00:00
|
|
|
if (0 != prog_prep(&prog))
|
|
|
|
{
|
|
|
|
LSQ_ERROR("could not prep");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2019-10-15 21:02:21 +00:00
|
|
|
if (!(client_ctx.hostname || prog.prog_hostname))
|
|
|
|
{
|
|
|
|
fprintf(stderr, "Specify hostname (used for SNI and :authority) via "
|
|
|
|
"-H option\n");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2019-09-11 15:27:58 +00:00
|
|
|
if (was_empty && token)
|
|
|
|
sport_set_token(TAILQ_LAST(&sports, sport_head), token);
|
2017-09-22 21:00:03 +00:00
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
if (client_ctx.qif_file)
|
|
|
|
{
|
|
|
|
if (0 != prog_connect(&prog, NULL, 0))
|
|
|
|
{
|
|
|
|
LSQ_ERROR("connection failed");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
create_connections(&client_ctx);
|
2017-09-22 21:00:03 +00:00
|
|
|
|
|
|
|
LSQ_DEBUG("entering event loop");
|
|
|
|
|
|
|
|
s = prog_run(&prog);
|
2019-01-16 20:13:59 +00:00
|
|
|
|
|
|
|
if (stats_fh)
|
|
|
|
{
|
|
|
|
elapsed = (long double) (lsquic_time_now() - start_time) / 1000000;
|
|
|
|
fprintf(stats_fh, "overall statistics as calculated by %s:\n", argv[0]);
|
|
|
|
display_stat(stats_fh, &s_stat_to_conn, "time for connect");
|
|
|
|
display_stat(stats_fh, &s_stat_req, "time for request");
|
|
|
|
display_stat(stats_fh, &s_stat_ttfb, "time to 1st byte");
|
|
|
|
fprintf(stats_fh, "downloaded %lu application bytes in %.3Lf seconds\n",
|
|
|
|
s_stat_downloaded_bytes, elapsed);
|
|
|
|
fprintf(stats_fh, "%.2Lf reqs/sec; %.0Lf bytes/sec\n",
|
|
|
|
(long double) s_stat_req.n / elapsed,
|
|
|
|
(long double) s_stat_downloaded_bytes / elapsed);
|
2019-01-28 20:41:28 +00:00
|
|
|
fprintf(stats_fh, "read handler count %lu\n", prog.prog_read_count);
|
2019-01-16 20:13:59 +00:00
|
|
|
}
|
|
|
|
|
2017-09-22 21:00:03 +00:00
|
|
|
prog_cleanup(&prog);
|
|
|
|
if (promise_fd >= 0)
|
|
|
|
(void) close(promise_fd);
|
|
|
|
|
2018-08-27 17:55:06 +00:00
|
|
|
while ((pe = TAILQ_FIRST(&client_ctx.hcc_path_elems)))
|
|
|
|
{
|
|
|
|
TAILQ_REMOVE(&client_ctx.hcc_path_elems, pe, next_pe);
|
|
|
|
free(pe);
|
|
|
|
}
|
|
|
|
|
2019-09-11 15:27:58 +00:00
|
|
|
if (client_ctx.qif_fh)
|
|
|
|
(void) fclose(client_ctx.qif_fh);
|
|
|
|
|
2020-10-07 13:41:26 +00:00
|
|
|
free(priority_specs);
|
2017-09-22 21:00:03 +00:00
|
|
|
exit(0 == s ? EXIT_SUCCESS : EXIT_FAILURE);
|
|
|
|
}
|