Latest changes

- Do not create gap in sent packnos when squeezing delayed
  packets.
- sendctl checks for all unacked bytes, not just retx bytes.
- connections with blocked scheduled packets are not tickable
  for sending.
This commit is contained in:
Dmitri Tikhonov 2018-04-25 10:58:55 -04:00
parent bdba46fd00
commit aa0d8cfff0
4 changed files with 81 additions and 19 deletions

View file

@ -1,3 +1,11 @@
2018-04-25
- [BUGFIX] Do not create gap in sent packnos when squeezing delayed
packets.
- [BUGFIX] sendctl checks for all unacked bytes, not just retx bytes.
- [BUGFIX] connections with blocked scheduled packets are not tickable
for sending.
2018-04-23
- Fix busy loop: tickable must make progress. When connection is

View file

@ -2613,6 +2613,14 @@ write_is_possible (struct full_conn *conn)
}
static int
should_generate_ack (const struct full_conn *conn)
{
return (conn->fc_flags & FC_ACK_QUEUED)
|| lsquic_send_ctl_lost_ack(&conn->fc_send_ctl);
}
static enum tick_st
full_conn_ci_tick (lsquic_conn_t *lconn, lsquic_time_t now)
{
@ -2708,8 +2716,7 @@ full_conn_ci_tick (lsquic_conn_t *lconn, lsquic_time_t now)
have_delayed_packets = lsquic_send_ctl_maybe_squeeze_sched(
&conn->fc_send_ctl);
if ((conn->fc_flags & FC_ACK_QUEUED) ||
lsquic_send_ctl_lost_ack(&conn->fc_send_ctl))
if (should_generate_ack(conn))
{
if (have_delayed_packets)
lsquic_send_ctl_reset_packnos(&conn->fc_send_ctl);
@ -3317,7 +3324,9 @@ full_conn_ci_is_tickable (lsquic_conn_t *lconn)
if (!TAILQ_EMPTY(&conn->fc_pub.service_streams))
return 1;
if (lsquic_send_ctl_can_send(&conn->fc_send_ctl))
if (lsquic_send_ctl_can_send(&conn->fc_send_ctl)
&& (should_generate_ack(conn) ||
!lsquic_send_ctl_sched_is_blocked(&conn->fc_send_ctl)))
{
if (lsquic_send_ctl_has_buffered(&conn->fc_send_ctl))
return 1;

View file

@ -885,6 +885,15 @@ send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
}
static unsigned
send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl)
{
return ctl->sc_bytes_scheduled
+ ctl->sc_bytes_unacked_all
+ ctl->sc_bytes_out;
}
int
lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
{
@ -902,9 +911,9 @@ __attribute__((weak))
int
lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
{
const unsigned n_out = send_ctl_retx_bytes_out(ctl);
LSQ_DEBUG("%s: n_out: %u (unacked_retx: %u, out: %u); cwnd: %lu", __func__,
n_out, ctl->sc_bytes_unacked_retx, ctl->sc_bytes_out,
const unsigned n_out = send_ctl_all_bytes_out(ctl);
LSQ_DEBUG("%s: n_out: %u (unacked_all: %u, out: %u); cwnd: %lu", __func__,
n_out, ctl->sc_bytes_unacked_all, ctl->sc_bytes_out,
lsquic_cubic_get_cwnd(&ctl->sc_cubic));
if (ctl->sc_flags & SC_PACE)
{
@ -1041,6 +1050,21 @@ lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
}
/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want
* to check whether the first scheduled packet cannot be sent.
*/
int
lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *ctl)
{
const lsquic_packet_out_t *packet_out
= TAILQ_FIRST(&ctl->sc_scheduled_packets);
return ctl->sc_n_consec_rtos
&& 0 == ctl->sc_next_limit
&& packet_out
&& !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK));
}
lsquic_packet_out_t *
lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
{
@ -1298,6 +1322,28 @@ lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
}
/* Need to assign new packet numbers to all packets following the first
* dropped packet to eliminate packet number gap.
*/
static void
send_ctl_repackno_sched_tail (struct lsquic_send_ctl *ctl,
struct lsquic_packet_out *pre_dropped)
{
struct lsquic_packet_out *packet_out;
assert(pre_dropped);
ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
for (packet_out = TAILQ_NEXT(pre_dropped, po_next); packet_out;
packet_out = TAILQ_NEXT(packet_out, po_next))
{
packet_out->po_flags |= PO_REPACKNO;
if (packet_out->po_flags & PO_ENCRYPTED)
send_ctl_release_enc_data(ctl, packet_out);
}
}
/* The controller elides this STREAM frames of stream `stream_id' from
* scheduled and buffered packets. If a packet becomes empty as a result,
* it is dropped.
@ -1341,20 +1387,8 @@ lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
}
}
/* Need to assign new packet numbers to all packets following the first
* dropped packet to eliminate packet number gap.
*/
if (pre_dropped)
{
ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
for (packet_out = TAILQ_NEXT(pre_dropped, po_next); packet_out;
packet_out = TAILQ_NEXT(packet_out, po_next))
{
packet_out->po_flags |= PO_REPACKNO;
if (packet_out->po_flags & PO_ENCRYPTED)
send_ctl_release_enc_data(ctl, packet_out);
}
}
send_ctl_repackno_sched_tail(ctl, pre_dropped);
for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
sizeof(ctl->sc_buffered_packets[0]); ++n)
@ -1456,10 +1490,12 @@ int
lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
{
struct lsquic_packet_out *packet_out, *next;
struct lsquic_packet_out *pre_dropped;
#ifndef NDEBUG
int pre_squeeze_logged = 0;
#endif
pre_dropped = NULL;
for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
packet_out = next)
{
@ -1477,6 +1513,9 @@ lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
LOG_PACKET_Q(&ctl->sc_scheduled_packets,
"unacked packets before squeezing");
#endif
if (!pre_dropped)
pre_dropped = TAILQ_PREV(packet_out, lsquic_packets_tailq,
po_next);
send_ctl_sched_remove(ctl, packet_out);
LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
packet_out->po_packno);
@ -1484,6 +1523,9 @@ lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
}
}
if (pre_dropped)
send_ctl_repackno_sched_tail(ctl, pre_dropped);
#ifndef NDEBUG
if (pre_squeeze_logged)
LOG_PACKET_Q(&ctl->sc_scheduled_packets,

View file

@ -276,4 +276,7 @@ lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *);
lsquic_send_ctl_sanity_check(ctl); \
} while (0)
int
lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *);
#endif