uh oh im bundling the deps

This commit is contained in:
cere 2024-02-21 01:17:59 -05:00
parent ae28da8d60
commit ecca301ceb
584 changed files with 119933 additions and 24 deletions

View file

@ -0,0 +1,21 @@
""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

View file

@ -0,0 +1,128 @@
import cython
cdef cython.uint DNS_COMPRESSION_HEADER_LEN
cdef cython.uint MAX_DNS_LABELS
cdef cython.uint DNS_COMPRESSION_POINTER_LEN
cdef cython.uint MAX_NAME_LENGTH
cdef cython.uint _TYPE_A
cdef cython.uint _TYPE_CNAME
cdef cython.uint _TYPE_PTR
cdef cython.uint _TYPE_TXT
cdef cython.uint _TYPE_SRV
cdef cython.uint _TYPE_HINFO
cdef cython.uint _TYPE_AAAA
cdef cython.uint _TYPE_NSEC
cdef cython.uint _FLAGS_QR_MASK
cdef cython.uint _FLAGS_QR_MASK
cdef cython.uint _FLAGS_TC
cdef cython.uint _FLAGS_QR_QUERY
cdef cython.uint _FLAGS_QR_RESPONSE
cdef object DECODE_EXCEPTIONS
cdef object IncomingDecodeError
from .._dns cimport (
DNSAddress,
DNSEntry,
DNSHinfo,
DNSNsec,
DNSPointer,
DNSQuestion,
DNSRecord,
DNSService,
DNSText,
)
from .._utils.time cimport current_time_millis
cdef class DNSIncoming:
cdef bint _did_read_others
cdef public unsigned int flags
cdef cython.uint offset
cdef public bytes data
cdef const unsigned char [:] view
cdef unsigned int _data_len
cdef cython.dict _name_cache
cdef cython.list _questions
cdef cython.list _answers
cdef public cython.uint id
cdef cython.uint _num_questions
cdef cython.uint _num_answers
cdef cython.uint _num_authorities
cdef cython.uint _num_additionals
cdef public bint valid
cdef public double now
cdef public object scope_id
cdef public object source
cdef bint _has_qu_question
@cython.locals(
question=DNSQuestion
)
cpdef bint has_qu_question(self)
cpdef bint is_query(self)
cpdef bint is_probe(self)
cpdef list answers(self)
cpdef bint is_response(self)
@cython.locals(
off="unsigned int",
label_idx="unsigned int",
length="unsigned int",
link="unsigned int",
link_data="unsigned int",
link_py_int=object,
linked_labels=cython.list
)
cdef unsigned int _decode_labels_at_offset(self, unsigned int off, cython.list labels, cython.set seen_pointers)
@cython.locals(offset="unsigned int")
cdef void _read_header(self)
cdef void _initial_parse(self)
@cython.locals(
end="unsigned int",
length="unsigned int",
offset="unsigned int"
)
cdef void _read_others(self)
@cython.locals(offset="unsigned int")
cdef _read_questions(self)
@cython.locals(
length="unsigned int",
)
cdef str _read_character_string(self)
cdef bytes _read_string(self, unsigned int length)
@cython.locals(
name_start="unsigned int",
offset="unsigned int"
)
cdef _read_record(self, object domain, unsigned int type_, unsigned int class_, unsigned int ttl, unsigned int length)
@cython.locals(
offset="unsigned int",
offset_plus_one="unsigned int",
offset_plus_two="unsigned int",
window="unsigned int",
bit="unsigned int",
byte="unsigned int",
i="unsigned int",
bitmap_length="unsigned int",
)
cdef list _read_bitmap(self, unsigned int end)
cdef str _read_name(self)

View file

@ -0,0 +1,442 @@
""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import struct
import sys
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from .._dns import (
DNSAddress,
DNSHinfo,
DNSNsec,
DNSPointer,
DNSQuestion,
DNSRecord,
DNSService,
DNSText,
)
from .._exceptions import IncomingDecodeError
from .._logger import log
from .._utils.time import current_time_millis
from ..const import (
_FLAGS_QR_MASK,
_FLAGS_QR_QUERY,
_FLAGS_QR_RESPONSE,
_FLAGS_TC,
_TYPE_A,
_TYPE_AAAA,
_TYPE_CNAME,
_TYPE_HINFO,
_TYPE_NSEC,
_TYPE_PTR,
_TYPE_SRV,
_TYPE_TXT,
_TYPES,
)
DNS_COMPRESSION_HEADER_LEN = 1
DNS_COMPRESSION_POINTER_LEN = 2
MAX_DNS_LABELS = 128
MAX_NAME_LENGTH = 253
DECODE_EXCEPTIONS = (IndexError, struct.error, IncomingDecodeError)
_seen_logs: Dict[str, Union[int, tuple]] = {}
_str = str
_int = int
class DNSIncoming:
"""Object representation of an incoming DNS packet"""
__slots__ = (
"_did_read_others",
'flags',
'offset',
'data',
'view',
'_data_len',
'_name_cache',
'_questions',
'_answers',
'id',
'_num_questions',
'_num_answers',
'_num_authorities',
'_num_additionals',
'valid',
'now',
'scope_id',
'source',
'_has_qu_question',
)
def __init__(
self,
data: bytes,
source: Optional[Tuple[str, int]] = None,
scope_id: Optional[int] = None,
now: Optional[float] = None,
) -> None:
"""Constructor from string holding bytes of packet"""
self.flags = 0
self.offset = 0
self.data = data
self.view = data
self._data_len = len(data)
self._name_cache: Dict[int, List[str]] = {}
self._questions: List[DNSQuestion] = []
self._answers: List[DNSRecord] = []
self.id = 0
self._num_questions = 0
self._num_answers = 0
self._num_authorities = 0
self._num_additionals = 0
self.valid = False
self._did_read_others = False
self.now = now or current_time_millis()
self.source = source
self.scope_id = scope_id
self._has_qu_question = False
try:
self._initial_parse()
except DECODE_EXCEPTIONS:
self._log_exception_debug(
'Received invalid packet from %s at offset %d while unpacking %r',
self.source,
self.offset,
self.data,
)
def is_query(self) -> bool:
"""Returns true if this is a query."""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
def is_response(self) -> bool:
"""Returns true if this is a response."""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
def has_qu_question(self) -> bool:
"""Returns true if any question is a QU question."""
return self._has_qu_question
@property
def truncated(self) -> bool:
"""Returns true if this is a truncated."""
return (self.flags & _FLAGS_TC) == _FLAGS_TC
@property
def questions(self) -> List[DNSQuestion]:
"""Questions in the packet."""
return self._questions
@property
def num_questions(self) -> int:
"""Number of questions in the packet."""
return self._num_questions
@property
def num_answers(self) -> int:
"""Number of answers in the packet."""
return self._num_answers
@property
def num_authorities(self) -> int:
"""Number of authorities in the packet."""
return self._num_authorities
@property
def num_additionals(self) -> int:
"""Number of additionals in the packet."""
return self._num_additionals
def _initial_parse(self) -> None:
"""Parse the data needed to initalize the packet object."""
self._read_header()
self._read_questions()
if not self._num_questions:
self._read_others()
self.valid = True
@classmethod
def _log_exception_debug(cls, *logger_data: Any) -> None:
log_exc_info = False
exc_info = sys.exc_info()
exc_str = str(exc_info[1])
if exc_str not in _seen_logs:
# log the trace only on the first time
_seen_logs[exc_str] = exc_info
log_exc_info = True
log.debug(*(logger_data or ['Exception occurred']), exc_info=log_exc_info)
def answers(self) -> List[DNSRecord]:
"""Answers in the packet."""
if not self._did_read_others:
try:
self._read_others()
except DECODE_EXCEPTIONS:
self._log_exception_debug(
'Received invalid packet from %s at offset %d while unpacking %r',
self.source,
self.offset,
self.data,
)
return self._answers
def is_probe(self) -> bool:
"""Returns true if this is a probe."""
return self._num_authorities > 0
def __repr__(self) -> str:
return '<DNSIncoming:{%s}>' % ', '.join(
[
'id=%s' % self.id,
'flags=%s' % self.flags,
'truncated=%s' % self.truncated,
'n_q=%s' % self._num_questions,
'n_ans=%s' % self._num_answers,
'n_auth=%s' % self._num_authorities,
'n_add=%s' % self._num_additionals,
'questions=%s' % self._questions,
'answers=%s' % self.answers(),
]
)
def _read_header(self) -> None:
"""Reads header portion of packet"""
view = self.view
offset = self.offset
self.offset += 12
# The header has 6 unsigned shorts in network order
self.id = view[offset] << 8 | view[offset + 1]
self.flags = view[offset + 2] << 8 | view[offset + 3]
self._num_questions = view[offset + 4] << 8 | view[offset + 5]
self._num_answers = view[offset + 6] << 8 | view[offset + 7]
self._num_authorities = view[offset + 8] << 8 | view[offset + 9]
self._num_additionals = view[offset + 10] << 8 | view[offset + 11]
def _read_questions(self) -> None:
"""Reads questions section of packet"""
view = self.view
questions = self._questions
for _ in range(self._num_questions):
name = self._read_name()
offset = self.offset
self.offset += 4
# The question has 2 unsigned shorts in network order
type_ = view[offset] << 8 | view[offset + 1]
class_ = view[offset + 2] << 8 | view[offset + 3]
question = DNSQuestion(name, type_, class_)
if question.unique: # QU questions use the same bit as unique
self._has_qu_question = True
questions.append(question)
def _read_character_string(self) -> str:
"""Reads a character string from the packet"""
length = self.view[self.offset]
self.offset += 1
info = self.data[self.offset : self.offset + length].decode('utf-8', 'replace')
self.offset += length
return info
def _read_string(self, length: _int) -> bytes:
"""Reads a string of a given length from the packet"""
info = self.data[self.offset : self.offset + length]
self.offset += length
return info
def _read_others(self) -> None:
"""Reads the answers, authorities and additionals section of the
packet"""
self._did_read_others = True
view = self.view
n = self._num_answers + self._num_authorities + self._num_additionals
for _ in range(n):
domain = self._read_name()
offset = self.offset
self.offset += 10
# type_, class_ and length are unsigned shorts in network order
# ttl is an unsigned long in network order https://www.rfc-editor.org/errata/eid2130
type_ = view[offset] << 8 | view[offset + 1]
class_ = view[offset + 2] << 8 | view[offset + 3]
ttl = view[offset + 4] << 24 | view[offset + 5] << 16 | view[offset + 6] << 8 | view[offset + 7]
length = view[offset + 8] << 8 | view[offset + 9]
end = self.offset + length
rec = None
try:
rec = self._read_record(domain, type_, class_, ttl, length)
except DECODE_EXCEPTIONS:
# Skip records that fail to decode if we know the length
# If the packet is really corrupt read_name and the unpack
# above would fail and hit the exception catch in read_others
self.offset = end
log.debug(
'Unable to parse; skipping record for %s with type %s at offset %d while unpacking %r',
domain,
_TYPES.get(type_, type_),
self.offset,
self.data,
exc_info=True,
)
if rec is not None:
self._answers.append(rec)
def _read_record(
self, domain: _str, type_: _int, class_: _int, ttl: _int, length: _int
) -> Optional[DNSRecord]:
"""Read known records types and skip unknown ones."""
if type_ == _TYPE_A:
return DNSAddress(domain, type_, class_, ttl, self._read_string(4), None, self.now)
if type_ in (_TYPE_CNAME, _TYPE_PTR):
return DNSPointer(domain, type_, class_, ttl, self._read_name(), self.now)
if type_ == _TYPE_TXT:
return DNSText(domain, type_, class_, ttl, self._read_string(length), self.now)
if type_ == _TYPE_SRV:
view = self.view
offset = self.offset
self.offset += 6
# The SRV record has 3 unsigned shorts in network order
priority = view[offset] << 8 | view[offset + 1]
weight = view[offset + 2] << 8 | view[offset + 3]
port = view[offset + 4] << 8 | view[offset + 5]
return DNSService(
domain,
type_,
class_,
ttl,
priority,
weight,
port,
self._read_name(),
self.now,
)
if type_ == _TYPE_HINFO:
return DNSHinfo(
domain,
type_,
class_,
ttl,
self._read_character_string(),
self._read_character_string(),
self.now,
)
if type_ == _TYPE_AAAA:
return DNSAddress(domain, type_, class_, ttl, self._read_string(16), self.scope_id, self.now)
if type_ == _TYPE_NSEC:
name_start = self.offset
return DNSNsec(
domain,
type_,
class_,
ttl,
self._read_name(),
self._read_bitmap(name_start + length),
self.now,
)
# Try to ignore types we don't know about
# Skip the payload for the resource record so the next
# records can be parsed correctly
self.offset += length
return None
def _read_bitmap(self, end: _int) -> List[int]:
"""Reads an NSEC bitmap from the packet."""
rdtypes = []
view = self.view
while self.offset < end:
offset = self.offset
offset_plus_one = offset + 1
offset_plus_two = offset + 2
window = view[offset]
bitmap_length = view[offset_plus_one]
bitmap_end = offset_plus_two + bitmap_length
for i, byte in enumerate(self.data[offset_plus_two:bitmap_end]):
for bit in range(0, 8):
if byte & (0x80 >> bit):
rdtypes.append(bit + window * 256 + i * 8)
self.offset += 2 + bitmap_length
return rdtypes
def _read_name(self) -> str:
"""Reads a domain name from the packet."""
labels: List[str] = []
seen_pointers: Set[int] = set()
original_offset = self.offset
self.offset = self._decode_labels_at_offset(original_offset, labels, seen_pointers)
self._name_cache[original_offset] = labels
name = ".".join(labels) + "."
if len(name) > MAX_NAME_LENGTH:
raise IncomingDecodeError(
f"DNS name {name} exceeds maximum length of {MAX_NAME_LENGTH} from {self.source}"
)
return name
def _decode_labels_at_offset(self, off: _int, labels: List[str], seen_pointers: Set[int]) -> int:
# This is a tight loop that is called frequently, small optimizations can make a difference.
view = self.view
while off < self._data_len:
length = view[off]
if length == 0:
return off + DNS_COMPRESSION_HEADER_LEN
if length < 0x40:
label_idx = off + DNS_COMPRESSION_HEADER_LEN
labels.append(self.data[label_idx : label_idx + length].decode('utf-8', 'replace'))
off += DNS_COMPRESSION_HEADER_LEN + length
continue
if length < 0xC0:
raise IncomingDecodeError(
f"DNS compression type {length} is unknown at {off} from {self.source}"
)
# We have a DNS compression pointer
link_data = view[off + 1]
link = (length & 0x3F) * 256 + link_data
link_py_int = link
if link > self._data_len:
raise IncomingDecodeError(
f"DNS compression pointer at {off} points to {link} beyond packet from {self.source}"
)
if link == off:
raise IncomingDecodeError(
f"DNS compression pointer at {off} points to itself from {self.source}"
)
if link_py_int in seen_pointers:
raise IncomingDecodeError(
f"DNS compression pointer at {off} was seen again from {self.source}"
)
linked_labels = self._name_cache.get(link_py_int)
if not linked_labels:
linked_labels = []
seen_pointers.add(link_py_int)
self._decode_labels_at_offset(link, linked_labels, seen_pointers)
self._name_cache[link_py_int] = linked_labels
labels.extend(linked_labels)
if len(labels) > MAX_DNS_LABELS:
raise IncomingDecodeError(
f"Maximum dns labels reached while processing pointer at {off} from {self.source}"
)
return off + DNS_COMPRESSION_POINTER_LEN
raise IncomingDecodeError(f"Corrupt packet received while decoding name from {self.source}")

View file

@ -0,0 +1,142 @@
import cython
from .._dns cimport DNSEntry, DNSPointer, DNSQuestion, DNSRecord
from .incoming cimport DNSIncoming
cdef cython.uint _CLASS_UNIQUE
cdef cython.uint _DNS_PACKET_HEADER_LEN
cdef cython.uint _FLAGS_QR_MASK
cdef cython.uint _FLAGS_QR_QUERY
cdef cython.uint _FLAGS_QR_RESPONSE
cdef cython.uint _FLAGS_TC
cdef cython.uint _MAX_MSG_ABSOLUTE
cdef cython.uint _MAX_MSG_TYPICAL
cdef bint TYPE_CHECKING
cdef unsigned int SHORT_CACHE_MAX
cdef object PACK_BYTE
cdef object PACK_SHORT
cdef object PACK_LONG
cdef unsigned int STATE_INIT
cdef unsigned int STATE_FINISHED
cdef object LOGGING_IS_ENABLED_FOR
cdef object LOGGING_DEBUG
cdef cython.tuple BYTE_TABLE
cdef cython.tuple SHORT_LOOKUP
cdef cython.dict LONG_LOOKUP
cdef class DNSOutgoing:
cdef public unsigned int flags
cdef public bint finished
cdef public object id
cdef public bint multicast
cdef public cython.list packets_data
cdef public cython.dict names
cdef public cython.list data
cdef public unsigned int size
cdef public bint allow_long
cdef public unsigned int state
cdef public cython.list questions
cdef public cython.list answers
cdef public cython.list authorities
cdef public cython.list additionals
cpdef void _reset_for_next_packet(self)
cdef void _write_byte(self, cython.uint value)
cdef void _insert_short_at_start(self, unsigned int value)
cdef void _replace_short(self, cython.uint index, cython.uint value)
cdef _get_short(self, cython.uint value)
cdef void _write_int(self, object value)
cdef cython.bint _write_question(self, DNSQuestion question)
@cython.locals(
d=cython.bytes,
data_view=cython.list,
index=cython.uint,
length=cython.uint
)
cdef cython.bint _write_record(self, DNSRecord record, double now)
@cython.locals(class_=cython.uint)
cdef void _write_record_class(self, DNSEntry record)
@cython.locals(
start_size_int=object
)
cdef cython.bint _check_data_limit_or_rollback(self, cython.uint start_data_length, cython.uint start_size)
@cython.locals(questions_written=cython.uint)
cdef cython.uint _write_questions_from_offset(self, unsigned int questions_offset)
@cython.locals(answers_written=cython.uint)
cdef cython.uint _write_answers_from_offset(self, unsigned int answer_offset)
@cython.locals(records_written=cython.uint)
cdef cython.uint _write_records_from_offset(self, cython.list records, unsigned int offset)
cdef bint _has_more_to_add(self, unsigned int questions_offset, unsigned int answer_offset, unsigned int authority_offset, unsigned int additional_offset)
cdef void _write_ttl(self, DNSRecord record, double now)
@cython.locals(
labels=cython.list,
label=cython.str,
index=cython.uint,
start_size=cython.uint,
name_length=cython.uint,
)
cpdef void write_name(self, cython.str name)
cdef void _write_link_to_name(self, unsigned int index)
cpdef void write_short(self, cython.uint value)
cpdef void write_string(self, cython.bytes value)
@cython.locals(utfstr=bytes)
cdef void _write_utf(self, cython.str value)
@cython.locals(
debug_enable=bint,
made_progress=bint,
has_more_to_add=bint,
questions_offset="unsigned int",
answer_offset="unsigned int",
authority_offset="unsigned int",
additional_offset="unsigned int",
questions_written="unsigned int",
answers_written="unsigned int",
authorities_written="unsigned int",
additionals_written="unsigned int",
)
cpdef packets(self)
cpdef void add_question(self, DNSQuestion question)
cpdef void add_answer(self, DNSIncoming inp, DNSRecord record)
@cython.locals(now_double=double)
cpdef void add_answer_at_time(self, DNSRecord record, double now)
cpdef void add_authorative_answer(self, DNSPointer record)
cpdef void add_additional_answer(self, DNSRecord record)
cpdef bint is_query(self)
cpdef bint is_response(self)

View file

@ -0,0 +1,498 @@
""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import enum
import logging
from struct import Struct
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
from .._dns import DNSPointer, DNSQuestion, DNSRecord
from .._exceptions import NamePartTooLongException
from .._logger import log
from ..const import (
_CLASS_UNIQUE,
_DNS_HOST_TTL,
_DNS_OTHER_TTL,
_DNS_PACKET_HEADER_LEN,
_FLAGS_QR_MASK,
_FLAGS_QR_QUERY,
_FLAGS_QR_RESPONSE,
_FLAGS_TC,
_MAX_MSG_ABSOLUTE,
_MAX_MSG_TYPICAL,
)
from .incoming import DNSIncoming
str_ = str
float_ = float
int_ = int
bytes_ = bytes
DNSQuestion_ = DNSQuestion
DNSRecord_ = DNSRecord
PACK_BYTE = Struct('>B').pack
PACK_SHORT = Struct('>H').pack
PACK_LONG = Struct('>L').pack
SHORT_CACHE_MAX = 128
BYTE_TABLE = tuple(PACK_BYTE(i) for i in range(256))
SHORT_LOOKUP = tuple(PACK_SHORT(i) for i in range(SHORT_CACHE_MAX))
LONG_LOOKUP = {i: PACK_LONG(i) for i in (_DNS_OTHER_TTL, _DNS_HOST_TTL, 0)}
class State(enum.Enum):
init = 0
finished = 1
STATE_INIT = State.init.value
STATE_FINISHED = State.finished.value
LOGGING_IS_ENABLED_FOR = log.isEnabledFor
LOGGING_DEBUG = logging.DEBUG
class DNSOutgoing:
"""Object representation of an outgoing packet"""
__slots__ = (
'flags',
'finished',
'id',
'multicast',
'packets_data',
'names',
'data',
'size',
'allow_long',
'state',
'questions',
'answers',
'authorities',
'additionals',
)
def __init__(self, flags: int, multicast: bool = True, id_: int = 0) -> None:
self.flags = flags
self.finished = False
self.id = id_
self.multicast = multicast
self.packets_data: List[bytes] = []
# these 3 are per-packet -- see also _reset_for_next_packet()
self.names: Dict[str, int] = {}
self.data: List[bytes] = []
self.size: int = _DNS_PACKET_HEADER_LEN
self.allow_long: bool = True
self.state = STATE_INIT
self.questions: List[DNSQuestion] = []
self.answers: List[Tuple[DNSRecord, float]] = []
self.authorities: List[DNSPointer] = []
self.additionals: List[DNSRecord] = []
def is_query(self) -> bool:
"""Returns true if this is a query."""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
def is_response(self) -> bool:
"""Returns true if this is a response."""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
def _reset_for_next_packet(self) -> None:
self.names = {}
self.data = []
self.size = _DNS_PACKET_HEADER_LEN
self.allow_long = True
def __repr__(self) -> str:
return '<DNSOutgoing:{%s}>' % ', '.join(
[
'multicast=%s' % self.multicast,
'flags=%s' % self.flags,
'questions=%s' % self.questions,
'answers=%s' % self.answers,
'authorities=%s' % self.authorities,
'additionals=%s' % self.additionals,
]
)
def add_question(self, record: DNSQuestion) -> None:
"""Adds a question"""
self.questions.append(record)
def add_answer(self, inp: DNSIncoming, record: DNSRecord) -> None:
"""Adds an answer"""
if not record.suppressed_by(inp):
self.add_answer_at_time(record, 0.0)
def add_answer_at_time(self, record: Optional[DNSRecord], now: float_) -> None:
"""Adds an answer if it does not expire by a certain time"""
now_double = now
if record is not None and (now_double == 0 or not record.is_expired(now_double)):
self.answers.append((record, now))
def add_authorative_answer(self, record: DNSPointer) -> None:
"""Adds an authoritative answer"""
self.authorities.append(record)
def add_additional_answer(self, record: DNSRecord) -> None:
"""Adds an additional answer
From: RFC 6763, DNS-Based Service Discovery, February 2013
12. DNS Additional Record Generation
DNS has an efficiency feature whereby a DNS server may place
additional records in the additional section of the DNS message.
These additional records are records that the client did not
explicitly request, but the server has reasonable grounds to expect
that the client might request them shortly, so including them can
save the client from having to issue additional queries.
This section recommends which additional records SHOULD be generated
to improve network efficiency, for both Unicast and Multicast DNS-SD
responses.
12.1. PTR Records
When including a DNS-SD Service Instance Enumeration or Selective
Instance Enumeration (subtype) PTR record in a response packet, the
server/responder SHOULD include the following additional records:
o The SRV record(s) named in the PTR rdata.
o The TXT record(s) named in the PTR rdata.
o All address records (type "A" and "AAAA") named in the SRV rdata.
12.2. SRV Records
When including an SRV record in a response packet, the
server/responder SHOULD include the following additional records:
o All address records (type "A" and "AAAA") named in the SRV rdata.
"""
self.additionals.append(record)
def _write_byte(self, value: int_) -> None:
"""Writes a single byte to the packet"""
self.data.append(BYTE_TABLE[value])
self.size += 1
def _get_short(self, value: int_) -> bytes:
"""Convert an unsigned short to 2 bytes."""
return SHORT_LOOKUP[value] if value < SHORT_CACHE_MAX else PACK_SHORT(value)
def _insert_short_at_start(self, value: int_) -> None:
"""Inserts an unsigned short at the start of the packet"""
self.data.insert(0, self._get_short(value))
def _replace_short(self, index: int_, value: int_) -> None:
"""Replaces an unsigned short in a certain position in the packet"""
self.data[index] = self._get_short(value)
def write_short(self, value: int_) -> None:
"""Writes an unsigned short to the packet"""
self.data.append(self._get_short(value))
self.size += 2
def _write_int(self, value: Union[float, int]) -> None:
"""Writes an unsigned integer to the packet"""
value_as_int = int(value)
long_bytes = LONG_LOOKUP.get(value_as_int)
if long_bytes is not None:
self.data.append(long_bytes)
else:
self.data.append(PACK_LONG(value_as_int))
self.size += 4
def write_string(self, value: bytes_) -> None:
"""Writes a string to the packet"""
if TYPE_CHECKING:
assert isinstance(value, bytes)
self.data.append(value)
self.size += len(value)
def _write_utf(self, s: str_) -> None:
"""Writes a UTF-8 string of a given length to the packet"""
utfstr = s.encode('utf-8')
length = len(utfstr)
if length > 64:
raise NamePartTooLongException
self._write_byte(length)
self.write_string(utfstr)
def write_character_string(self, value: bytes) -> None:
if TYPE_CHECKING:
assert isinstance(value, bytes)
length = len(value)
if length > 256:
raise NamePartTooLongException
self._write_byte(length)
self.write_string(value)
def write_name(self, name: str_) -> None:
"""
Write names to packet
18.14. Name Compression
When generating Multicast DNS messages, implementations SHOULD use
name compression wherever possible to compress the names of resource
records, by replacing some or all of the resource record name with a
compact two-byte reference to an appearance of that data somewhere
earlier in the message [RFC1035].
"""
# split name into each label
if name.endswith('.'):
name = name[:-1]
index = self.names.get(name, 0)
if index:
self._write_link_to_name(index)
return
start_size = self.size
labels = name.split('.')
# Write each new label or a pointer to the existing one in the packet
self.names[name] = start_size
self._write_utf(labels[0])
name_length = 0
for count in range(1, len(labels)):
partial_name = '.'.join(labels[count:])
index = self.names.get(partial_name, 0)
if index:
self._write_link_to_name(index)
return
if name_length == 0:
name_length = len(name.encode('utf-8'))
self.names[partial_name] = start_size + name_length - len(partial_name.encode('utf-8'))
self._write_utf(labels[count])
# this is the end of a name
self._write_byte(0)
def _write_link_to_name(self, index: int_) -> None:
# If part of the name already exists in the packet,
# create a pointer to it
self._write_byte((index >> 8) | 0xC0)
self._write_byte(index & 0xFF)
def _write_question(self, question: DNSQuestion_) -> bool:
"""Writes a question to the packet"""
start_data_length = len(self.data)
start_size = self.size
self.write_name(question.name)
self.write_short(question.type)
self._write_record_class(question)
return self._check_data_limit_or_rollback(start_data_length, start_size)
def _write_record_class(self, record: Union[DNSQuestion_, DNSRecord_]) -> None:
"""Write out the record class including the unique/unicast (QU) bit."""
class_ = record.class_
if record.unique is True and self.multicast:
self.write_short(class_ | _CLASS_UNIQUE)
else:
self.write_short(class_)
def _write_ttl(self, record: DNSRecord_, now: float_) -> None:
"""Write out the record ttl."""
self._write_int(record.ttl if now == 0 else record.get_remaining_ttl(now))
def _write_record(self, record: DNSRecord_, now: float_) -> bool:
"""Writes a record (answer, authoritative answer, additional) to
the packet. Returns True on success, or False if we did not
because the packet because the record does not fit."""
start_data_length = len(self.data)
start_size = self.size
self.write_name(record.name)
self.write_short(record.type)
self._write_record_class(record)
self._write_ttl(record, now)
index = len(self.data)
self.write_short(0) # Will get replaced with the actual size
record.write(self)
# Adjust size for the short we will write before this record
length = 0
for d in self.data[index + 1 :]:
length += len(d)
# Here we replace the 0 length short we wrote
# before with the actual length
self._replace_short(index, length)
return self._check_data_limit_or_rollback(start_data_length, start_size)
def _check_data_limit_or_rollback(self, start_data_length: int_, start_size: int_) -> bool:
"""Check data limit, if we go over, then rollback and return False."""
len_limit = _MAX_MSG_ABSOLUTE if self.allow_long else _MAX_MSG_TYPICAL
self.allow_long = False
if self.size <= len_limit:
return True
if LOGGING_IS_ENABLED_FOR(LOGGING_DEBUG): # pragma: no branch
log.debug("Reached data limit (size=%d) > (limit=%d) - rolling back", self.size, len_limit)
del self.data[start_data_length:]
self.size = start_size
start_size_int = start_size
rollback_names = [name for name, idx in self.names.items() if idx >= start_size_int]
for name in rollback_names:
del self.names[name]
return False
def _write_questions_from_offset(self, questions_offset: int_) -> int:
questions_written = 0
for question in self.questions[questions_offset:]:
if not self._write_question(question):
break
questions_written += 1
return questions_written
def _write_answers_from_offset(self, answer_offset: int_) -> int:
answers_written = 0
for answer, time_ in self.answers[answer_offset:]:
if not self._write_record(answer, time_):
break
answers_written += 1
return answers_written
def _write_records_from_offset(self, records: Sequence[DNSRecord], offset: int_) -> int:
records_written = 0
for record in records[offset:]:
if not self._write_record(record, 0):
break
records_written += 1
return records_written
def _has_more_to_add(
self, questions_offset: int_, answer_offset: int_, authority_offset: int_, additional_offset: int_
) -> bool:
"""Check if all questions, answers, authority, and additionals have been written to the packet."""
return (
questions_offset < len(self.questions)
or answer_offset < len(self.answers)
or authority_offset < len(self.authorities)
or additional_offset < len(self.additionals)
)
def packets(self) -> List[bytes]:
"""Returns a list of bytestrings containing the packets' bytes
No further parts should be added to the packet once this
is done. The packets are each restricted to _MAX_MSG_TYPICAL
or less in length, except for the case of a single answer which
will be written out to a single oversized packet no more than
_MAX_MSG_ABSOLUTE in length (and hence will be subject to IP
fragmentation potentially)."""
packets_data = self.packets_data
if self.state == STATE_FINISHED:
return packets_data
questions_offset = 0
answer_offset = 0
authority_offset = 0
additional_offset = 0
# we have to at least write out the question
debug_enable = LOGGING_IS_ENABLED_FOR(LOGGING_DEBUG) is True
has_more_to_add = True
while has_more_to_add:
if debug_enable:
log.debug(
"offsets = questions=%d, answers=%d, authorities=%d, additionals=%d",
questions_offset,
answer_offset,
authority_offset,
additional_offset,
)
log.debug(
"lengths = questions=%d, answers=%d, authorities=%d, additionals=%d",
len(self.questions),
len(self.answers),
len(self.authorities),
len(self.additionals),
)
questions_written = self._write_questions_from_offset(questions_offset)
answers_written = self._write_answers_from_offset(answer_offset)
authorities_written = self._write_records_from_offset(self.authorities, authority_offset)
additionals_written = self._write_records_from_offset(self.additionals, additional_offset)
made_progress = bool(self.data)
self._insert_short_at_start(additionals_written)
self._insert_short_at_start(authorities_written)
self._insert_short_at_start(answers_written)
self._insert_short_at_start(questions_written)
questions_offset += questions_written
answer_offset += answers_written
authority_offset += authorities_written
additional_offset += additionals_written
if debug_enable:
log.debug(
"now offsets = questions=%d, answers=%d, authorities=%d, additionals=%d",
questions_offset,
answer_offset,
authority_offset,
additional_offset,
)
has_more_to_add = self._has_more_to_add(
questions_offset, answer_offset, authority_offset, additional_offset
)
if has_more_to_add and self.is_query():
# https://datatracker.ietf.org/doc/html/rfc6762#section-7.2
if debug_enable: # pragma: no branch
log.debug("Setting TC flag")
self._insert_short_at_start(self.flags | _FLAGS_TC)
else:
self._insert_short_at_start(self.flags)
if self.multicast:
self._insert_short_at_start(0)
else:
self._insert_short_at_start(self.id)
packets_data.append(b''.join(self.data))
if not made_progress:
# Generating an empty packet is not a desirable outcome, but currently
# too many internals rely on this behavior. So, we'll just return an
# empty packet and log a warning until this can be refactored at a later
# date.
log.warning("packets() made no progress adding records; returning")
break
if has_more_to_add:
self._reset_for_next_packet()
self.state = STATE_FINISHED
return packets_data