Release 2.24.5

- [FEATURE] Improve Delayed ACKs extension and turn it on by default.
- Limit receive history to a finite amount of memory.
This commit is contained in:
Dmitri Tikhonov 2020-11-24 08:51:36 -05:00
parent 8e6b1576b4
commit f38b395a31
21 changed files with 525 additions and 113 deletions

View file

@ -48,6 +48,7 @@ const char *const lsquic_alid2str[] =
[AL_SESS_TICKET] = "SESS_TICKET",
[AL_BLOCKED_KA] = "BLOCKED_KA",
[AL_MTU_PROBE] = "MTU_PROBE",
[AL_PACK_TOL] = "PACK_TOL",
};

View file

@ -38,6 +38,7 @@ enum alarm_id {
AL_PATH_CHAL_3,
AL_SESS_TICKET,
AL_BLOCKED_KA, /* Blocked Keep-Alive */
AL_PACK_TOL, /* Calculate packet tolerance */
MAX_LSQUIC_ALARMS
};
@ -59,6 +60,7 @@ enum alarm_id_bit {
ALBIT_SESS_TICKET = 1 << AL_SESS_TICKET,
ALBIT_BLOCKED_KA = 1 << AL_BLOCKED_KA,
ALBIT_MTU_PROBE = 1 << AL_MTU_PROBE,
ALBIT_PACK_TOL = 1 << AL_PACK_TOL,
};
@ -92,6 +94,9 @@ lsquic_alarmset_init_alarm (lsquic_alarmset_t *, enum alarm_id,
#define lsquic_alarmset_are_set(alarmset, flags) \
((alarmset)->as_armed_set & (flags))
#define lsquic_alarmset_is_inited(alarmset_, al_id_) ( \
(alarmset_)->as_alarms[al_id_].callback)
/* Timers "fire," alarms "ring." */
void
lsquic_alarmset_ring_expired (lsquic_alarmset_t *, lsquic_time_t now);

View file

@ -647,8 +647,10 @@ gen_trans_params (struct enc_sess_iquic *enc_sess, unsigned char *buf,
}
if (settings->es_delayed_acks)
{
params.tp_numerics[TPI_MIN_ACK_DELAY] = 10000; /* TODO: make into a constant? make configurable? */
params.tp_numerics[TPI_MIN_ACK_DELAY] = TP_MIN_ACK_DELAY;
params.tp_set |= 1 << TPI_MIN_ACK_DELAY;
params.tp_numerics[TPI_MIN_ACK_DELAY_02] = TP_MIN_ACK_DELAY;
params.tp_set |= 1 << TPI_MIN_ACK_DELAY_02;
}
if (settings->es_timestamps)
{
@ -1687,6 +1689,17 @@ get_peer_transport_params (struct enc_sess_iquic *enc_sess)
}
LSQ_DEBUG("have peer transport parameters (%zu bytes)", bufsz);
if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))
{
params_str = lsquic_mm_get_4k(&enc_sess->esi_enpub->enp_mm);
if (params_str)
{
lsquic_hexdump(params_buf, bufsz, params_str, 0x1000);
LSQ_DEBUG("transport parameters (%zd bytes):\n%s", bufsz,
params_str);
lsquic_mm_put_4k(&enc_sess->esi_enpub->enp_mm, params_str);
}
}
if (0 > (version == LSQVER_ID27 ? lsquic_tp_decode_27
: lsquic_tp_decode)(params_buf, bufsz,
!(enc_sess->esi_flags & ESI_SERVER),
@ -3343,8 +3356,10 @@ lsquic_enc_sess_ietf_gen_quic_ctx (
}
if (settings->es_delayed_acks)
{
params.tp_numerics[TPI_MIN_ACK_DELAY] = 10000; /* TODO: make into a constant? make configurable? */
params.tp_numerics[TPI_MIN_ACK_DELAY] = TP_MIN_ACK_DELAY;
params.tp_set |= 1 << TPI_MIN_ACK_DELAY;
params.tp_numerics[TPI_MIN_ACK_DELAY_02] = TP_MIN_ACK_DELAY;
params.tp_set |= 1 << TPI_MIN_ACK_DELAY_02;
}
if (settings->es_timestamps)
{

View file

@ -382,6 +382,14 @@ lsquic_engine_init_settings (struct lsquic_engine_settings *settings,
settings->es_cc_rtt_thresh = LSQUIC_DF_CC_RTT_THRESH;
settings->es_optimistic_nat = LSQUIC_DF_OPTIMISTIC_NAT;
settings->es_ext_http_prio = LSQUIC_DF_EXT_HTTP_PRIO;
settings->es_ptpc_periodicity= LSQUIC_DF_PTPC_PERIODICITY;
settings->es_ptpc_max_packtol= LSQUIC_DF_PTPC_MAX_PACKTOL;
settings->es_ptpc_dyn_target = LSQUIC_DF_PTPC_DYN_TARGET;
settings->es_ptpc_target = LSQUIC_DF_PTPC_TARGET;
settings->es_ptpc_prop_gain = LSQUIC_DF_PTPC_PROP_GAIN;
settings->es_ptpc_int_gain = LSQUIC_DF_PTPC_INT_GAIN;
settings->es_ptpc_err_thresh = LSQUIC_DF_PTPC_ERR_THRESH;
settings->es_ptpc_err_divisor= LSQUIC_DF_PTPC_ERR_DIVISOR;
}

View file

@ -81,6 +81,9 @@ enum stream_if { STREAM_IF_STD, STREAM_IF_HSK, STREAM_IF_HDR, N_STREAM_IFS };
#define MAX_RETR_PACKETS_SINCE_LAST_ACK 2
#define ACK_TIMEOUT 25000
/* Maximum number of ACK ranges that can fit into gQUIC ACK frame */
#define MAX_ACK_RANGES 256
/* IMPORTANT: Keep values of FC_SERVER and FC_HTTP same as LSENG_SERVER
* and LSENG_HTTP.
*/
@ -661,7 +664,7 @@ new_conn_common (lsquic_cid_t cid, struct lsquic_engine_public *enpub,
conn->fc_pub.all_streams = lsquic_hash_create();
if (!conn->fc_pub.all_streams)
goto cleanup_on_error;
lsquic_rechist_init(&conn->fc_rechist, 0);
lsquic_rechist_init(&conn->fc_rechist, 0, MAX_ACK_RANGES);
if (conn->fc_flags & FC_HTTP)
{
conn->fc_pub.u.gquic.hs = lsquic_headers_stream_new(
@ -4411,7 +4414,7 @@ lsquic_gquic_full_conn_srej (struct lsquic_conn *lconn)
/* Reset receive history */
lsquic_rechist_cleanup(&conn->fc_rechist);
lsquic_rechist_init(&conn->fc_rechist, 0);
lsquic_rechist_init(&conn->fc_rechist, 0, MAX_ACK_RANGES);
/* Reset send controller state */
lsquic_send_ctl_cleanup(&conn->fc_send_ctl);

View file

@ -6,6 +6,7 @@
#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
@ -101,6 +102,7 @@
#define CLIENT_PUSH_SUPPORT 0
/* IMPORTANT: Keep values of IFC_SERVER and IFC_HTTP same as LSENG_SERVER
* and LSENG_HTTP.
*/
@ -336,11 +338,11 @@ struct conn_path
};
struct inc_ack_stats /* Incoming ACK stats */
struct packet_tolerance_stats
{
unsigned n_acks; /* Number of ACKs between ticks */
float avg_acked; /* Packets acked between ticks */
float avg_n_acks; /* Average number of ACKs */
unsigned n_acks; /* Number of ACKs between probes */
float integral_error;
lsquic_time_t last_sample;
};
@ -447,6 +449,7 @@ struct ietf_full_conn
unsigned ifc_n_slack_akbl[N_PNS];
unsigned ifc_n_slack_all; /* App PNS only */
unsigned ifc_max_retx_since_last_ack;
lsquic_time_t ifc_max_ack_delay;
uint64_t ifc_ecn_counts_in[N_PNS][4];
uint64_t ifc_ecn_counts_out[N_PNS][4];
lsquic_stream_id_t ifc_max_req_id;
@ -479,6 +482,11 @@ struct ietf_full_conn
unsigned ifc_last_retire_prior_to;
unsigned ifc_ack_freq_seqno;
unsigned ifc_last_pack_tol;
unsigned ifc_last_calc_pack_tol;
#if LSQUIC_CONN_STATS
unsigned ifc_min_pack_tol_sent;
unsigned ifc_max_pack_tol_sent;
#endif
unsigned ifc_max_ack_freq_seqno; /* Incoming */
unsigned short ifc_max_udp_payload; /* Cached TP */
lsquic_time_t ifc_last_live_update;
@ -511,7 +519,8 @@ struct ietf_full_conn
uint64_t ifc_last_max_data_off_sent;
unsigned short ifc_min_dg_sz,
ifc_max_dg_sz;
struct inc_ack_stats ifc_ias;
struct packet_tolerance_stats
ifc_pts;
#if LSQUIC_CONN_STATS
struct conn_stats ifc_stats,
*ifc_last_stats;
@ -584,6 +593,10 @@ mtu_probe_too_large (struct ietf_full_conn *, const struct lsquic_packet_out *);
static int
apply_trans_params (struct ietf_full_conn *, const struct transport_params *);
static void
packet_tolerance_alarm_expired (enum alarm_id al_id, void *ctx,
lsquic_time_t expiry, lsquic_time_t now);
static int
init_http (struct ietf_full_conn *);
@ -1256,9 +1269,15 @@ ietf_full_conn_init (struct ietf_full_conn *conn,
lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PATH_CHAL_3, path_chal_alarm_expired, conn);
lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_BLOCKED_KA, blocked_ka_alarm_expired, conn);
lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_MTU_PROBE, mtu_probe_alarm_expired, conn);
lsquic_rechist_init(&conn->ifc_rechist[PNS_INIT], 1);
lsquic_rechist_init(&conn->ifc_rechist[PNS_HSK], 1);
lsquic_rechist_init(&conn->ifc_rechist[PNS_APP], 1);
/* For Init and Handshake, we don't expect many ranges at all. For
* the regular receive history, set limit to a value that would never
* be reached under normal circumstances, yet small enough that would
* use little memory when under attack and be robust (fast). The
* value 1000 limits receive history to about 16KB.
*/
lsquic_rechist_init(&conn->ifc_rechist[PNS_INIT], 1, 10);
lsquic_rechist_init(&conn->ifc_rechist[PNS_HSK], 1, 10);
lsquic_rechist_init(&conn->ifc_rechist[PNS_APP], 1, 1000);
lsquic_send_ctl_init(&conn->ifc_send_ctl, &conn->ifc_alset, enpub,
flags & IFC_SERVER ? &server_ver_neg : &conn->ifc_u.cli.ifcli_ver_neg,
&conn->ifc_pub, SC_IETF|SC_NSTP|(ecn ? SC_ECN : 0));
@ -1287,6 +1306,7 @@ ietf_full_conn_init (struct ietf_full_conn *conn,
conn->ifc_max_req_id = VINT_MAX_VALUE + 1;
conn->ifc_ping_unretx_thresh = 20;
conn->ifc_max_retx_since_last_ack = MAX_RETR_PACKETS_SINCE_LAST_ACK;
conn->ifc_max_ack_delay = ACK_TIMEOUT;
if (conn->ifc_settings->es_noprogress_timeout)
conn->ifc_mflags |= MF_NOPROG_TIMEOUT;
if (conn->ifc_settings->es_ext_http_prio)
@ -3096,6 +3116,7 @@ ietf_full_conn_ci_destroy (struct lsquic_conn *lconn)
if (conn->ifc_flags & IFC_CREATED_OK)
{
LSQ_NOTICE("# ticks: %lu", conn->ifc_stats.n_ticks);
LSQ_NOTICE("sent %lu packets", conn->ifc_stats.out.packets);
LSQ_NOTICE("received %lu packets, of which %lu were not decryptable, %lu were "
"dups and %lu were errors; sent %lu packets, avg stream data per outgoing"
" packet is %lu bytes",
@ -3104,7 +3125,19 @@ ietf_full_conn_ci_destroy (struct lsquic_conn *lconn)
conn->ifc_stats.out.packets,
conn->ifc_stats.out.stream_data_sz /
(conn->ifc_stats.out.packets ? conn->ifc_stats.out.packets : 1));
LSQ_NOTICE("ACKs: in: %lu; processed: %lu; merged: %lu",
if (conn->ifc_flags & IFC_DELAYED_ACKS)
LSQ_NOTICE("delayed ACKs settings: (%u/%.3f/%.3f/%.3f/%.3f/%.3f); "
"packet tolerances sent: count: %u, min: %u, max: %u",
conn->ifc_settings->es_ptpc_periodicity,
conn->ifc_settings->es_ptpc_target,
conn->ifc_settings->es_ptpc_prop_gain,
conn->ifc_settings->es_ptpc_int_gain,
conn->ifc_settings->es_ptpc_err_thresh,
conn->ifc_settings->es_ptpc_err_divisor,
conn->ifc_ack_freq_seqno,
conn->ifc_min_pack_tol_sent, conn->ifc_max_pack_tol_sent);
LSQ_NOTICE("ACKs: delayed acks on: %s; in: %lu; processed: %lu; merged: %lu",
conn->ifc_flags & IFC_DELAYED_ACKS ? "yes" : "no",
conn->ifc_stats.in.n_acks, conn->ifc_stats.in.n_acks_proc,
conn->ifc_stats.in.n_acks_merged);
}
@ -3497,10 +3530,18 @@ apply_trans_params (struct ietf_full_conn *conn,
LSQ_DEBUG("PING period is set to %"PRIu64" usec", conn->ifc_ping_period);
if (conn->ifc_settings->es_delayed_acks
&& (params->tp_set & (1 << TPI_MIN_ACK_DELAY)))
&& (params->tp_set
& ((1 << TPI_MIN_ACK_DELAY)|(1 << TPI_MIN_ACK_DELAY_02))))
{
/* We do not use the min_ack_delay value for anything at the moment,
* as ACK_FREQUENCY frames we generate do not change the peer's max
* ACK delay. When or if we do decide to do it, don't forget to use
* the correct value here -- based on which TP is set!
*/
LSQ_DEBUG("delayed ACKs enabled");
conn->ifc_flags |= IFC_DELAYED_ACKS;
lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PACK_TOL,
packet_tolerance_alarm_expired, conn);
}
if (conn->ifc_settings->es_timestamps
&& (params->tp_set & (1 << TPI_TIMESTAMPS))
@ -4416,23 +4457,6 @@ generate_handshake_done_frame (struct ietf_full_conn *conn,
}
static unsigned
packet_tolerance (float avg_acked)
{
if (avg_acked < 3)
return 2;
if (avg_acked < 10)
return 4;
if (avg_acked < 20)
return 8;
if (avg_acked < 40)
return 16;
if (avg_acked < 80)
return 32;
return 64;
}
static void
generate_ack_frequency_frame (struct ietf_full_conn *conn, lsquic_time_t unused)
{
@ -4445,7 +4469,7 @@ generate_ack_frequency_frame (struct ietf_full_conn *conn, lsquic_time_t unused)
const int ignore = 1;
need = conn->ifc_conn.cn_pf->pf_ack_frequency_frame_size(
conn->ifc_ack_freq_seqno, conn->ifc_last_pack_tol,
conn->ifc_ack_freq_seqno, conn->ifc_last_calc_pack_tol,
conn->ifc_pub.max_peer_ack_usec);
packet_out = get_writeable_packet(conn, need);
if (!packet_out)
@ -4454,11 +4478,10 @@ generate_ack_frequency_frame (struct ietf_full_conn *conn, lsquic_time_t unused)
return;
}
conn->ifc_last_pack_tol = packet_tolerance(conn->ifc_ias.avg_acked);
sz = conn->ifc_conn.cn_pf->pf_gen_ack_frequency_frame(
packet_out->po_data + packet_out->po_data_sz,
lsquic_packet_out_avail(packet_out),
conn->ifc_ack_freq_seqno, conn->ifc_last_pack_tol,
conn->ifc_ack_freq_seqno, conn->ifc_last_calc_pack_tol,
conn->ifc_pub.max_peer_ack_usec, ignore);
if (sz < 0)
{
@ -4471,13 +4494,25 @@ generate_ack_frequency_frame (struct ietf_full_conn *conn, lsquic_time_t unused)
ABORT_ERROR("adding frame to packet failed: %d", errno);
return;
}
conn->ifc_last_pack_tol = conn->ifc_last_calc_pack_tol;
lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz);
packet_out->po_frame_types |= QUIC_FTBIT_ACK_FREQUENCY;
EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID,
"Generated ACK_FREQUENCY(seqno: %u; pack_tol: %u; "
"upd: %u; ignore: %d)", conn->ifc_ack_freq_seqno,
conn->ifc_last_pack_tol, conn->ifc_pub.max_peer_ack_usec, ignore);
LSQ_DEBUG("Generated ACK_FREQUENCY(seqno: %u; pack_tol: %u; "
"upd: %u; ignore: %d)", conn->ifc_ack_freq_seqno,
conn->ifc_last_pack_tol, conn->ifc_pub.max_peer_ack_usec, ignore);
++conn->ifc_ack_freq_seqno;
conn->ifc_send_flags &= ~SF_SEND_ACK_FREQUENCY;
#if LSQUIC_CONN_STATS
if (conn->ifc_last_pack_tol > conn->ifc_max_pack_tol_sent)
conn->ifc_max_pack_tol_sent = conn->ifc_last_pack_tol;
if (conn->ifc_last_pack_tol < conn->ifc_min_pack_tol_sent
|| 0 == conn->ifc_min_pack_tol_sent)
conn->ifc_min_pack_tol_sent = conn->ifc_last_pack_tol;
#endif
}
@ -4776,44 +4811,158 @@ handshake_confirmed (struct ietf_full_conn *conn)
}
static void
update_ema (float *val, unsigned new)
static float
calc_target (lsquic_time_t srtt_ms)
{
if (*val)
*val = (new - *val) * 0.4 + *val;
else
*val = new;
if (srtt_ms <= 5 * 1000)
return 2.5;
if (srtt_ms <= 10 * 1000)
return 2.0;
if (srtt_ms <= 15 * 1000)
return 1.6;
if (srtt_ms <= 20 * 1000)
return 1.4;
if (srtt_ms <= 30 * 1000)
return 1.3;
if (srtt_ms <= 40 * 1000)
return 1.2;
if (srtt_ms <= 50 * 1000)
return 1.1;
if (srtt_ms <= 60 * 1000)
return 1.0;
if (srtt_ms <= 70 * 1000)
return 0.9;
if (srtt_ms <= 80 * 1000)
return 0.8;
if (srtt_ms <= 100 * 1000)
return 0.7;
return 0.5;
}
static void
update_target_packet_tolerance (struct ietf_full_conn *conn,
const unsigned n_newly_acked)
packet_tolerance_alarm_expired (enum alarm_id al_id, void *ctx,
lsquic_time_t expiry, lsquic_time_t now)
{
unsigned tol;
struct ietf_full_conn *const conn = ctx;
const float Kp = conn->ifc_settings->es_ptpc_prop_gain,
Ki = conn->ifc_settings->es_ptpc_int_gain,
err_thresh = conn->ifc_settings->es_ptpc_err_thresh,
err_divisor = conn->ifc_settings->es_ptpc_err_divisor;
const unsigned periodicity = conn->ifc_settings->es_ptpc_periodicity;
const unsigned max_packtol = conn->ifc_settings->es_ptpc_max_packtol;
float avg_acks_per_rtt, error, combined_error, normalized,
combined_error_abs, target, rtts;
double dt;
lsquic_time_t srtt, begin_t;
update_ema(&conn->ifc_ias.avg_n_acks, conn->ifc_ias.n_acks);
update_ema(&conn->ifc_ias.avg_acked, n_newly_acked);
srtt = lsquic_rtt_stats_get_srtt(&conn->ifc_pub.rtt_stats);
tol = packet_tolerance(conn->ifc_ias.avg_acked);
LSQ_DEBUG("packtol logic: %u ACK frames (avg: %.2f), %u newly acked "
"(avg: %.1f), last sent %u, would-be tol: %u", conn->ifc_ias.n_acks,
conn->ifc_ias.avg_n_acks, n_newly_acked, conn->ifc_ias.avg_acked,
conn->ifc_last_pack_tol, tol);
if (conn->ifc_ias.avg_n_acks > 1.5 && conn->ifc_ias.avg_acked > 2.0
&& tol > conn->ifc_last_pack_tol)
if (srtt == 0)
goto end;
if (0 == conn->ifc_pts.n_acks)
/* Don't reset last_sample and calculate average for both this and next
* period the next time around.
*/
goto end;
if (conn->ifc_settings->es_ptpc_dyn_target)
target = calc_target(srtt);
else
target = conn->ifc_settings->es_ptpc_target;
dt = periodicity * (double) srtt / 1000000;
begin_t = conn->ifc_pts.last_sample ? conn->ifc_pts.last_sample
: conn->ifc_created;
/*
LSQ_DEBUG("begin: %"PRIu64"; now: %"PRIu64"; SRTT: %"PRIu64"; acks: %u",
begin_t, now, srtt, conn->ifc_pts.n_acks);
*/
rtts = (float) (now - begin_t) / (float) srtt;
avg_acks_per_rtt = (float) conn->ifc_pts.n_acks / (float) rtts;
normalized = avg_acks_per_rtt * M_E / target;
error = logf(normalized) - 1;
conn->ifc_pts.integral_error += error * (float) dt;
combined_error = Kp * error + Ki * conn->ifc_pts.integral_error;
combined_error_abs = fabsf(combined_error);
conn->ifc_pts.last_sample = now;
if (combined_error_abs > err_thresh)
{
LSQ_DEBUG("old packet tolerance target: %u, schedule ACK_FREQUENCY "
"increase", conn->ifc_last_pack_tol);
conn->ifc_send_flags |= SF_SEND_ACK_FREQUENCY;
unsigned adj = combined_error_abs / err_divisor;
unsigned last_pack_tol = conn->ifc_last_pack_tol;
if (0 == last_pack_tol)
{
last_pack_tol = (unsigned)
lsquic_senhist_largest(&conn->ifc_send_ctl.sc_senhist)
/ conn->ifc_pts.n_acks;
LSQ_DEBUG("packets sent: %"PRIu64"; ACKs received: %u; implied "
"tolerance: %u",
lsquic_senhist_largest(&conn->ifc_send_ctl.sc_senhist),
conn->ifc_pts.n_acks, last_pack_tol);
if (last_pack_tol < 2)
last_pack_tol = 2;
else if (last_pack_tol >= max_packtol)
last_pack_tol = max_packtol / 2;
}
if (combined_error > 0)
{
conn->ifc_last_calc_pack_tol = last_pack_tol + adj;
if (conn->ifc_last_calc_pack_tol >= max_packtol)
{
/* Clamp integral error when we can go no higher */
conn->ifc_pts.integral_error -= error * (float) dt;
conn->ifc_last_calc_pack_tol = max_packtol;
}
}
else
{
if (adj + 2 < last_pack_tol)
conn->ifc_last_calc_pack_tol = last_pack_tol - adj;
else
conn->ifc_last_calc_pack_tol = 2;
if (conn->ifc_last_calc_pack_tol == 2)
{
/* Clamp integral error when we can go no lower */
conn->ifc_pts.integral_error -= error * (float) dt;
}
}
if (conn->ifc_last_calc_pack_tol != conn->ifc_last_pack_tol)
{
LSQ_DEBUG("old packet tolerance target: %u, schedule ACK_FREQUENCY "
"%s to %u", conn->ifc_last_pack_tol,
combined_error > 0 ? "increase" : "decrease",
conn->ifc_last_calc_pack_tol);
conn->ifc_send_flags |= SF_SEND_ACK_FREQUENCY;
}
else
{
LSQ_DEBUG("packet tolerance unchanged at %u", conn->ifc_last_pack_tol);
conn->ifc_send_flags &= ~SF_SEND_ACK_FREQUENCY;
}
}
else if (conn->ifc_ias.avg_n_acks < 1.5
&& (float) tol < (float) conn->ifc_last_pack_tol * 3 / 4)
else
conn->ifc_send_flags &= ~SF_SEND_ACK_FREQUENCY;
LSQ_DEBUG("avg ACKs per RTT: %.3f; normalized: %.3f; target: %.3f; error: %.3f; "
"p-error: %.3f, i-error: %.3f; Overall: %.3f; "
"packet tolerance: current: %u, last: %u",
avg_acks_per_rtt, normalized, target, error, Kp * error,
conn->ifc_pts.integral_error, combined_error,
conn->ifc_last_calc_pack_tol, conn->ifc_last_pack_tol);
/* Until we have the first value, don't reset the counters */
if (conn->ifc_last_calc_pack_tol != 0)
conn->ifc_pts.n_acks = 0;
end:
if (lsquic_send_ctl_have_unacked_retx_data(&conn->ifc_send_ctl))
{
LSQ_DEBUG("old packet tolerance target: %u, schedule ACK_FREQUENCY "
"decrease", conn->ifc_last_pack_tol);
conn->ifc_send_flags |= SF_SEND_ACK_FREQUENCY;
LSQ_DEBUG("set PACK_TOL alarm %"PRIu64" microseconds into the future",
srtt * periodicity);
lsquic_alarmset_set(&conn->ifc_alset, al_id, now + srtt * periodicity);
}
else
LSQ_DEBUG("no unacked retx data: do not rearm the packet tolerance "
"alarm");
}
@ -4823,13 +4972,11 @@ process_ack (struct ietf_full_conn *conn, struct ack_info *acki,
{
enum packnum_space pns;
lsquic_packno_t packno;
unsigned n_unacked;
int one_rtt_acked;
CONN_STATS(in.n_acks_proc, 1);
LSQ_DEBUG("Processing ACK");
one_rtt_acked = lsquic_send_ctl_1rtt_acked(&conn->ifc_send_ctl);
n_unacked = lsquic_send_ctl_n_unacked(&conn->ifc_send_ctl);
if (0 == lsquic_send_ctl_got_ack(&conn->ifc_send_ctl, acki, received, now))
{
pns = acki->pns;
@ -4844,9 +4991,6 @@ process_ack (struct ietf_full_conn *conn, struct ack_info *acki,
ignore_init(conn);
handshake_confirmed(conn);
}
if (PNS_APP == pns && (conn->ifc_flags & IFC_DELAYED_ACKS))
update_target_packet_tolerance(conn,
n_unacked - lsquic_send_ctl_n_unacked(&conn->ifc_send_ctl));
return 0;
}
else
@ -5676,20 +5820,20 @@ process_ack_frame (struct ietf_full_conn *conn,
conn->ifc_max_ack_packno[pns] = packet_in->pi_packno;
new_acki->pns = pns;
++conn->ifc_pts.n_acks;
/* Only cache ACKs for PNS_APP */
if (pns == PNS_APP && new_acki == &conn->ifc_ack)
{
LSQ_DEBUG("Saved ACK");
conn->ifc_flags |= IFC_HAVE_SAVED_ACK;
conn->ifc_saved_ack_received = packet_in->pi_received;
conn->ifc_ias.n_acks = 1;
}
else if (pns == PNS_APP)
{
if (0 == lsquic_merge_acks(&conn->ifc_ack, new_acki))
{
CONN_STATS(in.n_acks_merged, 1);
++conn->ifc_ias.n_acks;
LSQ_DEBUG("merged into saved ACK, getting %s",
(lsquic_acki2str(&conn->ifc_ack, conn->ifc_pub.mm->ack_str,
MAX_ACKI_STR_SZ), conn->ifc_pub.mm->ack_str));
@ -6414,6 +6558,15 @@ process_ack_frequency_frame (struct ietf_full_conn *conn,
return 0;
}
if (upd_mad < TP_MIN_ACK_DELAY)
{
ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION,
"Update Max Ack Delay value of %"PRIu64" usec is invalid, as it "
"is smaller than the advertised min_ack_delay of %u usec",
upd_mad, TP_MIN_ACK_DELAY);
return 0;
}
if (conn->ifc_max_ack_freq_seqno > 0
&& seqno <= conn->ifc_max_ack_freq_seqno)
{
@ -6428,7 +6581,15 @@ process_ack_frequency_frame (struct ietf_full_conn *conn,
conn->ifc_max_retx_since_last_ack = pack_tol;
}
/* TODO: do something with max ack delay update */
if (upd_mad != conn->ifc_max_ack_delay)
{
conn->ifc_max_ack_delay = upd_mad;
LSQ_DEBUG("set Max Ack Delay to new value of %"PRIu64" usec",
conn->ifc_max_ack_delay);
}
else
LSQ_DEBUG("keep Max Ack Delay unchanged at %"PRIu64" usec",
conn->ifc_max_ack_delay);
if (ignore)
{
@ -6770,16 +6931,23 @@ try_queueing_ack_app (struct ietf_full_conn *conn,
}
else if (conn->ifc_n_slack_akbl[PNS_APP] > 0)
{
/* See https://github.com/quicwg/base-drafts/issues/3304 for more */
srtt = lsquic_rtt_stats_get_srtt(&conn->ifc_pub.rtt_stats);
if (srtt)
ack_timeout = MAX(1000, MIN(ACK_TIMEOUT, srtt / 4));
if (!lsquic_alarmset_is_set(&conn->ifc_alset, AL_ACK_APP))
{
/* See https://github.com/quicwg/base-drafts/issues/3304 for more */
srtt = lsquic_rtt_stats_get_srtt(&conn->ifc_pub.rtt_stats);
if (srtt)
ack_timeout = MAX(1000, MIN(conn->ifc_max_ack_delay, srtt / 4));
else
ack_timeout = conn->ifc_max_ack_delay;
lsquic_alarmset_set(&conn->ifc_alset, AL_ACK_APP,
now + ack_timeout);
LSQ_DEBUG("%s ACK alarm set to %"PRIu64, lsquic_pns2str[PNS_APP],
now + ack_timeout);
}
else
ack_timeout = ACK_TIMEOUT;
lsquic_alarmset_set(&conn->ifc_alset, AL_ACK_APP,
now + ack_timeout);
LSQ_DEBUG("%s ACK alarm set to %"PRIu64, lsquic_pns2str[PNS_APP],
now + ack_timeout);
LSQ_DEBUG("%s ACK alarm already set to %"PRIu64" usec from now",
lsquic_pns2str[PNS_APP],
conn->ifc_alset.as_expiry[AL_ACK_APP] - now);
}
}
@ -9524,3 +9692,4 @@ static const struct lsquic_stream_if unicla_if =
static const struct lsquic_stream_if *unicla_if_ptr = &unicla_if;
typedef char dcid_elem_fits_in_128_bytes[sizeof(struct dcid_elem) <= 128 ? 1 : - 1];

View file

@ -24,10 +24,17 @@
void
lsquic_rechist_init (struct lsquic_rechist *rechist, int ietf)
lsquic_rechist_init (struct lsquic_rechist *rechist, int ietf,
unsigned max_ranges)
{
memset(rechist, 0, sizeof(*rechist));
rechist->rh_cutoff = ietf ? 0 : 1;
/* '1' is an odd case that would add an extra conditional in
* rechist_reuse_last_elem(), so we prohibit it.
*/
if (max_ranges == 1)
max_ranges = 2;
rechist->rh_max_ranges = max_ranges;
}
@ -95,6 +102,8 @@ rechist_grow (struct lsquic_rechist *rechist)
nelems = rechist->rh_n_alloced * 2;
else
nelems = 4;
if (rechist->rh_max_ranges && nelems > rechist->rh_max_ranges)
nelems = rechist->rh_max_ranges;
n_masks = (nelems + (-nelems & (BITS_PER_MASK - 1))) / BITS_PER_MASK;
size = sizeof(struct rechist_elem) * nelems + sizeof(uintptr_t) * n_masks;
mem = realloc(rechist->rh_elems, size);
@ -117,15 +126,44 @@ rechist_grow (struct lsquic_rechist *rechist)
}
/* We hit maximum number of elements. To allocate a new element, we drop
* the last element and return its index, reusing the slot.
*/
static int
rechist_reuse_last_elem (struct lsquic_rechist *rechist)
{
struct rechist_elem *last, *penultimate;
unsigned last_idx;
/* No need to check bitmask anywhere: the array is full! */
last = rechist->rh_elems;
while (last->re_next != UINT_MAX)
++last;
last_idx = last - rechist->rh_elems;
penultimate = rechist->rh_elems;
while (penultimate->re_next != last_idx)
++penultimate;
penultimate->re_next = UINT_MAX;
return last_idx;
}
static int
rechist_alloc_elem (struct lsquic_rechist *rechist)
{
unsigned i, idx;
uintptr_t *mask;
if (rechist->rh_n_used == rechist->rh_n_alloced
&& 0 != rechist_grow(rechist))
return -1;
if (rechist->rh_n_used == rechist->rh_n_alloced)
{
if (rechist->rh_max_ranges
&& rechist->rh_n_used >= rechist->rh_max_ranges)
return rechist_reuse_last_elem(rechist);
if (0 != rechist_grow(rechist))
return -1;
}
for (mask = rechist->rh_masks; *mask == UINTPTR_MAX; ++mask)
;
@ -238,6 +276,8 @@ lsquic_rechist_received (lsquic_rechist_t *rechist, lsquic_packno_t packno,
el = &rechist->rh_elems[el->re_next];
}
if (rechist->rh_max_ranges && rechist->rh_n_used >= rechist->rh_max_ranges)
goto replace_last_el;
prev_idx = el - rechist->rh_elems;
idx = rechist_alloc_elem(rechist);
if (idx < 0)
@ -266,6 +306,9 @@ lsquic_rechist_received (lsquic_rechist_t *rechist, lsquic_packno_t packno,
return REC_ST_OK;
insert_before:
if (el->re_next == UINT_MAX && rechist->rh_max_ranges
&& rechist->rh_n_used >= rechist->rh_max_ranges)
goto replace_last_el;
prev_idx = prev - rechist->rh_elems;
next_idx = el - rechist->rh_elems;
idx = rechist_alloc_elem(rechist);
@ -282,6 +325,16 @@ lsquic_rechist_received (lsquic_rechist_t *rechist, lsquic_packno_t packno,
rechist_sanity_check(rechist);
return REC_ST_OK;
replace_last_el:
/* Special case: replace last element if chopping, because we cannot
* realloc the "prev_idx" hook
*/
assert(el->re_next == UINT_MAX);
el->re_low = packno;
el->re_count = 1;
rechist_sanity_check(rechist);
return REC_ST_OK;
}

View file

@ -30,6 +30,7 @@ struct lsquic_rechist {
unsigned rh_n_alloced;
unsigned rh_n_used;
unsigned rh_head;
unsigned rh_max_ranges;
enum {
RH_CUTOFF_SET = (1 << 0),
} rh_flags;
@ -46,7 +47,7 @@ struct lsquic_rechist {
typedef struct lsquic_rechist lsquic_rechist_t;
void
lsquic_rechist_init (struct lsquic_rechist *, int is_ietf);
lsquic_rechist_init (struct lsquic_rechist *, int is_ietf, unsigned max_ranges);
void
lsquic_rechist_cleanup (struct lsquic_rechist *);

View file

@ -191,6 +191,14 @@ send_ctl_first_unacked_retx_packet (const struct lsquic_send_ctl *ctl,
}
int
lsquic_send_ctl_have_unacked_retx_data (const struct lsquic_send_ctl *ctl)
{
return lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX_APP)
&& send_ctl_first_unacked_retx_packet(ctl, PNS_APP);
}
static lsquic_packet_out_t *
send_ctl_last_unacked_retx_packet (const struct lsquic_send_ctl *ctl,
enum packnum_space pns)
@ -507,6 +515,12 @@ set_retx_alarm (struct lsquic_send_ctl *ctl, enum packnum_space pns,
LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
" usec from now, mode %s", now + delay, delay, retx2str[rm]);
lsquic_alarmset_set(ctl->sc_alset, AL_RETX_INIT + pns, now + delay);
if (PNS_APP == pns
&& ctl->sc_ci == &lsquic_cong_bbr_if
&& lsquic_alarmset_is_inited(ctl->sc_alset, AL_PACK_TOL)
&& !lsquic_alarmset_is_set(ctl->sc_alset, AL_PACK_TOL))
lsquic_alarmset_set(ctl->sc_alset, AL_PACK_TOL, now + delay);
}

View file

@ -158,6 +158,9 @@ lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl);
int
lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *);
int
lsquic_send_ctl_have_unacked_retx_data (const struct lsquic_send_ctl *);
void
lsquic_send_ctl_cleanup (lsquic_send_ctl_t *);
@ -399,8 +402,6 @@ lsquic_send_ctl_cidlen_change (struct lsquic_send_ctl *,
void
lsquic_send_ctl_begin_optack_detection (struct lsquic_send_ctl *);
#define lsquic_send_ctl_n_unacked(ctl_) ((ctl_)->sc_n_in_flight_retx)
void
lsquic_send_ctl_path_validated (struct lsquic_send_ctl *);

View file

@ -61,6 +61,7 @@ tpi_val_2_enum (uint64_t tpi_val)
case 0x1057: return TPI_LOSS_BITS;
case 0x2AB2: return TPI_GREASE_QUIC_BIT;
case 0xDE1A: return TPI_MIN_ACK_DELAY;
case 0xFF02DE1A:return TPI_MIN_ACK_DELAY_02;
case 0x7158: return TPI_TIMESTAMPS;
default: return INT_MAX;
}
@ -92,6 +93,7 @@ static const unsigned enum_2_tpi_val[LAST_TPI + 1] =
#endif
[TPI_LOSS_BITS] = 0x1057,
[TPI_MIN_ACK_DELAY] = 0xDE1A,
[TPI_MIN_ACK_DELAY_02] = 0xFF02DE1A,
[TPI_TIMESTAMPS] = 0x7158,
[TPI_GREASE_QUIC_BIT] = 0x2AB2,
};
@ -122,6 +124,7 @@ const char * const lsquic_tpi2str[LAST_TPI + 1] =
#endif
[TPI_LOSS_BITS] = "loss_bits",
[TPI_MIN_ACK_DELAY] = "min_ack_delay",
[TPI_MIN_ACK_DELAY_02] = "min_ack_delay_02",
[TPI_TIMESTAMPS] = "timestamps",
[TPI_GREASE_QUIC_BIT] = "grease_quic_bit",
};
@ -162,6 +165,7 @@ static const uint64_t max_vals[MAX_NUMERIC_TPI + 1] =
[TPI_ACTIVE_CONNECTION_ID_LIMIT] = VINT_MAX_VALUE,
[TPI_LOSS_BITS] = 1,
[TPI_MIN_ACK_DELAY] = (1u << 24) - 1u,
[TPI_MIN_ACK_DELAY_02] = (1u << 24) - 1u,
[TPI_TIMESTAMPS] = TS_WANT_THEM|TS_GENERATE_THEM,
[TPI_MAX_DATAGRAM_FRAME_SIZE] = VINT_MAX_VALUE,
};
@ -172,6 +176,7 @@ static const uint64_t min_vals[MAX_NUMERIC_TPI + 1] =
/* On the other hand, we do enforce the lower bound. */
[TPI_MAX_UDP_PAYLOAD_SIZE] = 1200,
[TPI_MIN_ACK_DELAY] = 1,
[TPI_MIN_ACK_DELAY_02] = 1,
[TPI_ACTIVE_CONNECTION_ID_LIMIT] = 2,
[TPI_TIMESTAMPS] = TS_WANT_THEM,
};
@ -398,6 +403,7 @@ lsquic_tp_encode (const struct transport_params *params, int is_server,
case TPI_ACTIVE_CONNECTION_ID_LIMIT:
case TPI_LOSS_BITS:
case TPI_MIN_ACK_DELAY:
case TPI_MIN_ACK_DELAY_02:
case TPI_TIMESTAMPS:
case TPI_MAX_DATAGRAM_FRAME_SIZE:
vint_write(p, 1 << bits[tpi][2], bits[tpi][1],
@ -496,6 +502,7 @@ lsquic_tp_decode (const unsigned char *const buf, size_t bufsz,
s = vint_read(p, end, &param_id);
if (s < 0)
return -1;
LSQ_DEBUG("read TP 0x%"PRIX64, param_id);
p += s;
s = vint_read(p, end, &len);
if (s < 0)
@ -525,6 +532,7 @@ lsquic_tp_decode (const unsigned char *const buf, size_t bufsz,
case TPI_ACTIVE_CONNECTION_ID_LIMIT:
case TPI_LOSS_BITS:
case TPI_MIN_ACK_DELAY:
case TPI_MIN_ACK_DELAY_02:
case TPI_TIMESTAMPS:
case TPI_MAX_DATAGRAM_FRAME_SIZE:
switch (len)
@ -661,6 +669,17 @@ lsquic_tp_decode (const unsigned char *const buf, size_t bufsz,
return -1;
}
if ((params->tp_set & (1 << TPI_MIN_ACK_DELAY_02))
&& params->tp_numerics[TPI_MIN_ACK_DELAY_02]
> params->tp_numerics[TPI_MAX_ACK_DELAY] * 1000)
{
LSQ_DEBUG("min_ack_delay_02 (%"PRIu64" usec) is larger than "
"max_ack_delay (%"PRIu64" ms)",
params->tp_numerics[TPI_MIN_ACK_DELAY_02],
params->tp_numerics[TPI_MAX_ACK_DELAY]);
return -1;
}
return (int) (end - buf);
#undef EXPECT_LEN
}
@ -912,6 +931,7 @@ lsquic_tp_encode_27 (const struct transport_params *params, int is_server,
case TPI_ACTIVE_CONNECTION_ID_LIMIT:
case TPI_LOSS_BITS:
case TPI_MIN_ACK_DELAY:
case TPI_MIN_ACK_DELAY_02:
case TPI_TIMESTAMPS:
case TPI_MAX_DATAGRAM_FRAME_SIZE:
vint_write(p, 1 << bits[tpi][2], bits[tpi][1],
@ -1041,6 +1061,7 @@ lsquic_tp_decode_27 (const unsigned char *const buf, size_t bufsz,
case TPI_ACTIVE_CONNECTION_ID_LIMIT:
case TPI_LOSS_BITS:
case TPI_MIN_ACK_DELAY:
case TPI_MIN_ACK_DELAY_02:
case TPI_TIMESTAMPS:
case TPI_MAX_DATAGRAM_FRAME_SIZE:
switch (len)
@ -1171,6 +1192,17 @@ lsquic_tp_decode_27 (const unsigned char *const buf, size_t bufsz,
return -1;
}
if ((params->tp_set & (1 << TPI_MIN_ACK_DELAY_02))
&& params->tp_numerics[TPI_MIN_ACK_DELAY_02]
> params->tp_numerics[TPI_MAX_ACK_DELAY] * 1000)
{
LSQ_DEBUG("min_ack_delay_02 (%"PRIu64" usec) is larger than "
"max_ack_delay (%"PRIu64" ms)",
params->tp_numerics[TPI_MIN_ACK_DELAY_02],
params->tp_numerics[TPI_MAX_ACK_DELAY]);
return -1;
}
return (int) (end - buf);
#undef EXPECT_LEN
}

View file

@ -33,6 +33,14 @@ enum transport_param_id
* Numeric transport parameters without default values:
*/
TPI_MIN_ACK_DELAY,
/* The _02 version of MIN_ACK_DELAY is to support -02 version of the
* draft. Functionally, it's exactly the same as -01, but without the
* extra enum there is no easy way to keep encoding and decoding simple.
* Because we support both, we don't care which one the peer selects.
* If the peer, like us, also sends both, we *assume* that the two
* versions of the transport parameter carry the same values.
*/
TPI_MIN_ACK_DELAY_02,
TPI_TIMESTAMPS,
TPI_MAX_DATAGRAM_FRAME_SIZE,
TPI_LOSS_BITS, MAX_NUMERIC_TPI = TPI_LOSS_BITS,
@ -117,7 +125,7 @@ struct transport_params
#define TP_DEF_INIT_MAX_STREAM_DATA_BIDI_REMOTE 0
#define TP_DEF_INIT_MAX_STREAM_DATA_UNI 0
#define TP_DEF_MAX_IDLE_TIMEOUT 0
#define TP_DEF_MAX_ACK_DELAY 25
#define TP_DEF_MAX_ACK_DELAY 25u
#define TP_DEF_ACTIVE_CONNECTION_ID_LIMIT 2
/* [draft-ietf-quic-transport-18], Section 18.1 */
@ -202,8 +210,18 @@ lsquic_tp_get_quantum_sz (void);
| (1 << TPI_MAX_DATAGRAM_FRAME_SIZE) \
/* [draft-iyengar-quic-delayed-ack-01] does not specfiy, store: */ \
| (1 << TPI_MIN_ACK_DELAY) \
/* [draft-iyengar-quic-delayed-ack-02] does not specfiy, store: */ \
| (1 << TPI_MIN_ACK_DELAY_02) \
/* [draft-huitema-quic-ts-03] does not specfiy, store: */ \
| (1 << TPI_TIMESTAMPS) \
)
/* We always send the minimum ACK delay as 10ms; it is not configurable.
* There is nothing special about this particular value, except that it
* is not "too small" -- that is, it's within an order of magnitude from
* the maximum ACK delay of 25ms. 10ms seems like a safe lower bound for
* the ACK delay without performing more research.
*/
#define TP_MIN_ACK_DELAY 10000u
#endif